arm_mat_mult_fast_q15.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_mat_mult_fast_q15.c
  4. * Description: Q15 matrix multiplication (fast variant)
  5. *
  6. * $Date: 27. January 2017
  7. * $Revision: V.1.5.1
  8. *
  9. * Target Processor: Cortex-M cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "arm_math.h"
  29. /**
  30. * @ingroup groupMatrix
  31. */
  32. /**
  33. * @addtogroup MatrixMult
  34. * @{
  35. */
  36. /**
  37. * @brief Q15 matrix multiplication (fast variant) for Cortex-M3 and Cortex-M4
  38. * @param[in] *pSrcA points to the first input matrix structure
  39. * @param[in] *pSrcB points to the second input matrix structure
  40. * @param[out] *pDst points to output matrix structure
  41. * @param[in] *pState points to the array for storing intermediate results
  42. * @return The function returns either
  43. * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
  44. *
  45. * @details
  46. * <b>Scaling and Overflow Behavior:</b>
  47. *
  48. * \par
  49. * The difference between the function arm_mat_mult_q15() and this fast variant is that
  50. * the fast variant use a 32-bit rather than a 64-bit accumulator.
  51. * The result of each 1.15 x 1.15 multiplication is truncated to
  52. * 2.30 format. These intermediate results are accumulated in a 32-bit register in 2.30
  53. * format. Finally, the accumulator is saturated and converted to a 1.15 result.
  54. *
  55. * \par
  56. * The fast version has the same overflow behavior as the standard version but provides
  57. * less precision since it discards the low 16 bits of each multiplication result.
  58. * In order to avoid overflows completely the input signals must be scaled down.
  59. * Scale down one of the input matrices by log2(numColsA) bits to
  60. * avoid overflows, as a total of numColsA additions are computed internally for each
  61. * output element.
  62. *
  63. * \par
  64. * See <code>arm_mat_mult_q15()</code> for a slower implementation of this function
  65. * which uses 64-bit accumulation to provide higher precision.
  66. */
  67. arm_status arm_mat_mult_fast_q15(
  68. const arm_matrix_instance_q15 * pSrcA,
  69. const arm_matrix_instance_q15 * pSrcB,
  70. arm_matrix_instance_q15 * pDst,
  71. q15_t * pState)
  72. {
  73. q31_t sum; /* accumulator */
  74. q15_t *pSrcBT = pState; /* input data matrix pointer for transpose */
  75. q15_t *pInA = pSrcA->pData; /* input data matrix pointer A of Q15 type */
  76. q15_t *pInB = pSrcB->pData; /* input data matrix pointer B of Q15 type */
  77. q15_t *px; /* Temporary output data matrix pointer */
  78. uint16_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */
  79. uint16_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */
  80. uint16_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */
  81. uint16_t numRowsB = pSrcB->numRows; /* number of rows of input matrix A */
  82. uint32_t col, i = 0U, row = numRowsB, colCnt; /* loop counters */
  83. arm_status status; /* status of matrix multiplication */
  84. #ifndef UNALIGNED_SUPPORT_DISABLE
  85. q31_t in; /* Temporary variable to hold the input value */
  86. q31_t inA1, inA2, inB1, inB2;
  87. q31_t sum2, sum3, sum4;
  88. q15_t *pInA2, *pInB2, *px2;
  89. uint32_t j = 0;
  90. #else
  91. q15_t in; /* Temporary variable to hold the input value */
  92. q15_t inA1, inA2, inB1, inB2;
  93. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  94. #ifdef ARM_MATH_MATRIX_CHECK
  95. /* Check for matrix mismatch condition */
  96. if ((pSrcA->numCols != pSrcB->numRows) ||
  97. (pSrcA->numRows != pDst->numRows) || (pSrcB->numCols != pDst->numCols))
  98. {
  99. /* Set status as ARM_MATH_SIZE_MISMATCH */
  100. status = ARM_MATH_SIZE_MISMATCH;
  101. }
  102. else
  103. #endif
  104. {
  105. /* Matrix transpose */
  106. do
  107. {
  108. /* Apply loop unrolling and exchange the columns with row elements */
  109. col = numColsB >> 2;
  110. /* The pointer px is set to starting address of the column being processed */
  111. px = pSrcBT + i;
  112. /* First part of the processing with loop unrolling. Compute 4 outputs at a time.
  113. ** a second loop below computes the remaining 1 to 3 samples. */
  114. while (col > 0U)
  115. {
  116. #ifndef UNALIGNED_SUPPORT_DISABLE
  117. /* Read two elements from the row */
  118. in = *__SIMD32(pInB)++;
  119. /* Unpack and store one element in the destination */
  120. #ifndef ARM_MATH_BIG_ENDIAN
  121. *px = (q15_t) in;
  122. #else
  123. *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16);
  124. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  125. /* Update the pointer px to point to the next row of the transposed matrix */
  126. px += numRowsB;
  127. /* Unpack and store the second element in the destination */
  128. #ifndef ARM_MATH_BIG_ENDIAN
  129. *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16);
  130. #else
  131. *px = (q15_t) in;
  132. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  133. /* Update the pointer px to point to the next row of the transposed matrix */
  134. px += numRowsB;
  135. /* Read two elements from the row */
  136. in = *__SIMD32(pInB)++;
  137. /* Unpack and store one element in the destination */
  138. #ifndef ARM_MATH_BIG_ENDIAN
  139. *px = (q15_t) in;
  140. #else
  141. *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16);
  142. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  143. /* Update the pointer px to point to the next row of the transposed matrix */
  144. px += numRowsB;
  145. /* Unpack and store the second element in the destination */
  146. #ifndef ARM_MATH_BIG_ENDIAN
  147. *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16);
  148. #else
  149. *px = (q15_t) in;
  150. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  151. #else
  152. /* Read one element from the row */
  153. in = *pInB++;
  154. /* Store one element in the destination */
  155. *px = in;
  156. /* Update the pointer px to point to the next row of the transposed matrix */
  157. px += numRowsB;
  158. /* Read one element from the row */
  159. in = *pInB++;
  160. /* Store one element in the destination */
  161. *px = in;
  162. /* Update the pointer px to point to the next row of the transposed matrix */
  163. px += numRowsB;
  164. /* Read one element from the row */
  165. in = *pInB++;
  166. /* Store one element in the destination */
  167. *px = in;
  168. /* Update the pointer px to point to the next row of the transposed matrix */
  169. px += numRowsB;
  170. /* Read one element from the row */
  171. in = *pInB++;
  172. /* Store one element in the destination */
  173. *px = in;
  174. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  175. /* Update the pointer px to point to the next row of the transposed matrix */
  176. px += numRowsB;
  177. /* Decrement the column loop counter */
  178. col--;
  179. }
  180. /* If the columns of pSrcB is not a multiple of 4, compute any remaining output samples here.
  181. ** No loop unrolling is used. */
  182. col = numColsB % 0x4U;
  183. while (col > 0U)
  184. {
  185. /* Read and store the input element in the destination */
  186. *px = *pInB++;
  187. /* Update the pointer px to point to the next row of the transposed matrix */
  188. px += numRowsB;
  189. /* Decrement the column loop counter */
  190. col--;
  191. }
  192. i++;
  193. /* Decrement the row loop counter */
  194. row--;
  195. } while (row > 0U);
  196. /* Reset the variables for the usage in the following multiplication process */
  197. row = numRowsA;
  198. i = 0U;
  199. px = pDst->pData;
  200. #ifndef UNALIGNED_SUPPORT_DISABLE
  201. /* Process two rows from matrix A at a time and output two rows at a time */
  202. row = row >> 1;
  203. px2 = px + numColsB;
  204. #endif
  205. /* The following loop performs the dot-product of each row in pSrcA with each column in pSrcB */
  206. /* row loop */
  207. while (row > 0U)
  208. {
  209. /* For every row wise process, the column loop counter is to be initiated */
  210. col = numColsB;
  211. /* For every row wise process, the pIn2 pointer is set
  212. ** to the starting address of the transposed pSrcB data */
  213. pInB = pSrcBT;
  214. #ifndef UNALIGNED_SUPPORT_DISABLE
  215. /* Process two (transposed) columns from matrix B at a time */
  216. col = col >> 1;
  217. j = 0;
  218. #endif
  219. /* column loop */
  220. while (col > 0U)
  221. {
  222. /* Set the variable sum, that acts as accumulator, to zero */
  223. sum = 0;
  224. /* Initiate the pointer pInA to point to the starting address of the column being processed */
  225. pInA = pSrcA->pData + i;
  226. #ifndef UNALIGNED_SUPPORT_DISABLE
  227. sum2 = 0;
  228. sum3 = 0;
  229. sum4 = 0;
  230. pInB = pSrcBT + j;
  231. pInA2 = pInA + numColsA;
  232. pInB2 = pInB + numRowsB;
  233. /* Read in two elements at once - alows dual MAC instruction */
  234. colCnt = numColsA >> 1;
  235. #else
  236. colCnt = numColsA >> 2;
  237. #endif
  238. /* matrix multiplication */
  239. while (colCnt > 0U)
  240. {
  241. /* c(m,n) = a(1,1)*b(1,1) + a(1,2) * b(2,1) + .... + a(m,p)*b(p,n) */
  242. #ifndef UNALIGNED_SUPPORT_DISABLE
  243. inA1 = *__SIMD32(pInA)++;
  244. inB1 = *__SIMD32(pInB)++;
  245. inA2 = *__SIMD32(pInA2)++;
  246. inB2 = *__SIMD32(pInB2)++;
  247. sum = __SMLAD(inA1, inB1, sum);
  248. sum2 = __SMLAD(inA1, inB2, sum2);
  249. sum3 = __SMLAD(inA2, inB1, sum3);
  250. sum4 = __SMLAD(inA2, inB2, sum4);
  251. #else
  252. inA1 = *pInA;
  253. inB1 = *pInB;
  254. sum += inA1 * inB1;
  255. inA2 = pInA[1];
  256. inB2 = pInB[1];
  257. sum += inA2 * inB2;
  258. inA1 = pInA[2];
  259. inB1 = pInB[2];
  260. sum += inA1 * inB1;
  261. inA2 = pInA[3];
  262. inB2 = pInB[3];
  263. sum += inA2 * inB2;
  264. pInA += 4;
  265. pInB += 4;
  266. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  267. /* Decrement the loop counter */
  268. colCnt--;
  269. }
  270. /* process odd column samples */
  271. #ifndef UNALIGNED_SUPPORT_DISABLE
  272. if (numColsA & 1U) {
  273. inA1 = *pInA++;
  274. inB1 = *pInB++;
  275. inA2 = *pInA2++;
  276. inB2 = *pInB2++;
  277. sum += inA1 * inB1;
  278. sum2 += inA1 * inB2;
  279. sum3 += inA2 * inB1;
  280. sum4 += inA2 * inB2;
  281. }
  282. #else
  283. colCnt = numColsA % 0x4U;
  284. while (colCnt > 0U)
  285. {
  286. /* c(m,n) = a(1,1)*b(1,1) + a(1,2) * b(2,1) + .... + a(m,p)*b(p,n) */
  287. sum += (q31_t) (*pInA++) * (*pInB++);
  288. colCnt--;
  289. }
  290. #endif
  291. /* Saturate and store the result in the destination buffer */
  292. *px++ = (q15_t) (sum >> 15);
  293. #ifndef UNALIGNED_SUPPORT_DISABLE
  294. *px++ = (q15_t) (sum2 >> 15);
  295. *px2++ = (q15_t) (sum3 >> 15);
  296. *px2++ = (q15_t) (sum4 >> 15);
  297. j += numRowsB * 2;
  298. #endif
  299. /* Decrement the column loop counter */
  300. col--;
  301. }
  302. i = i + numColsA;
  303. #ifndef UNALIGNED_SUPPORT_DISABLE
  304. i = i + numColsA;
  305. px = px2 + (numColsB & 1U);
  306. px2 = px + numColsB;
  307. #endif
  308. /* Decrement the row loop counter */
  309. row--;
  310. }
  311. /* Compute any remaining odd row/column below */
  312. #ifndef UNALIGNED_SUPPORT_DISABLE
  313. /* Compute remaining output column */
  314. if (numColsB & 1U) {
  315. /* Avoid redundant computation of last element */
  316. row = numRowsA & (~0x1);
  317. /* Point to remaining unfilled column in output matrix */
  318. px = pDst->pData+numColsB-1;
  319. pInA = pSrcA->pData;
  320. /* row loop */
  321. while (row > 0)
  322. {
  323. /* point to last column in matrix B */
  324. pInB = pSrcBT + numRowsB*(numColsB-1);
  325. /* Set the variable sum, that acts as accumulator, to zero */
  326. sum = 0;
  327. /* Compute 4 columns at once */
  328. colCnt = numColsA >> 2;
  329. /* matrix multiplication */
  330. while (colCnt > 0U)
  331. {
  332. inA1 = *__SIMD32(pInA)++;
  333. inA2 = *__SIMD32(pInA)++;
  334. inB1 = *__SIMD32(pInB)++;
  335. inB2 = *__SIMD32(pInB)++;
  336. sum = __SMLAD(inA1, inB1, sum);
  337. sum = __SMLAD(inA2, inB2, sum);
  338. /* Decrement the loop counter */
  339. colCnt--;
  340. }
  341. colCnt = numColsA & 3U;
  342. while (colCnt > 0U) {
  343. sum += (q31_t) (*pInA++) * (*pInB++);
  344. colCnt--;
  345. }
  346. /* Store the result in the destination buffer */
  347. *px = (q15_t) (sum >> 15);
  348. px += numColsB;
  349. /* Decrement the row loop counter */
  350. row--;
  351. }
  352. }
  353. /* Compute remaining output row */
  354. if (numRowsA & 1U) {
  355. /* point to last row in output matrix */
  356. px = pDst->pData+(numColsB)*(numRowsA-1);
  357. pInB = pSrcBT;
  358. col = numColsB;
  359. i = 0U;
  360. /* col loop */
  361. while (col > 0)
  362. {
  363. /* point to last row in matrix A */
  364. pInA = pSrcA->pData + (numRowsA-1)*numColsA;
  365. /* Set the variable sum, that acts as accumulator, to zero */
  366. sum = 0;
  367. /* Compute 4 columns at once */
  368. colCnt = numColsA >> 2;
  369. /* matrix multiplication */
  370. while (colCnt > 0U)
  371. {
  372. inA1 = *__SIMD32(pInA)++;
  373. inA2 = *__SIMD32(pInA)++;
  374. inB1 = *__SIMD32(pInB)++;
  375. inB2 = *__SIMD32(pInB)++;
  376. sum = __SMLAD(inA1, inB1, sum);
  377. sum = __SMLAD(inA2, inB2, sum);
  378. /* Decrement the loop counter */
  379. colCnt--;
  380. }
  381. colCnt = numColsA & 3U;
  382. while (colCnt > 0U) {
  383. sum += (q31_t) (*pInA++) * (*pInB++);
  384. colCnt--;
  385. }
  386. /* Store the result in the destination buffer */
  387. *px++ = (q15_t) (sum >> 15);
  388. /* Decrement the col loop counter */
  389. col--;
  390. }
  391. }
  392. #endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
  393. /* set status as ARM_MATH_SUCCESS */
  394. status = ARM_MATH_SUCCESS;
  395. }
  396. /* Return to application */
  397. return (status);
  398. }
  399. /**
  400. * @} end of MatrixMult group
  401. */