arm_pool_q7_HWC.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. /*
  2. * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the License); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  14. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. /* ----------------------------------------------------------------------
  19. * Project: CMSIS NN Library
  20. * Title: arm_pool_q7_HWC.c
  21. * Description: Pooling function implementations
  22. *
  23. * $Date: 17. January 2018
  24. * $Revision: V.1.0.0
  25. *
  26. * Target Processor: Cortex-M cores
  27. *
  28. * -------------------------------------------------------------------- */
  29. #include "arm_math.h"
  30. #include "arm_nnfunctions.h"
  31. #if defined (ARM_MATH_DSP)
  32. /**
  33. * @brief A few utility functions used by pooling functions
  34. *
  35. *
  36. */
  37. static void buffer_scale_back_q15_to_q7(q15_t * buffer, q7_t * target, uint16_t length, uint16_t scale)
  38. {
  39. int i;
  40. for (i = 0; i < length; i++)
  41. {
  42. target[i] = (q7_t) (buffer[i] / scale);
  43. }
  44. }
  45. static void compare_and_replace_if_larger_q7(q7_t * base, // base data
  46. q7_t * target, // compare target
  47. const uint16_t length // data length
  48. )
  49. {
  50. q7_t *pIn = base;
  51. q7_t *pCom = target;
  52. union arm_nnword in;
  53. union arm_nnword com;
  54. uint16_t cnt = length >> 2;
  55. while (cnt > 0u)
  56. {
  57. in.word = *__SIMD32(pIn);
  58. com.word = *__SIMD32(pCom)++;
  59. // if version
  60. if (com.bytes[0] > in.bytes[0])
  61. in.bytes[0] = com.bytes[0];
  62. if (com.bytes[1] > in.bytes[1])
  63. in.bytes[1] = com.bytes[1];
  64. if (com.bytes[2] > in.bytes[2])
  65. in.bytes[2] = com.bytes[2];
  66. if (com.bytes[3] > in.bytes[3])
  67. in.bytes[3] = com.bytes[3];
  68. *__SIMD32(pIn)++ = in.word;
  69. cnt--;
  70. }
  71. }
  72. static void accumulate_q7_to_q15(q15_t * base, q7_t * target, const uint16_t length)
  73. {
  74. q15_t *pCnt = base;
  75. q7_t *pV = target;
  76. q31_t v1, v2, vo1, vo2;
  77. uint16_t cnt = length >> 2;
  78. q31_t in;
  79. while (cnt > 0u)
  80. {
  81. q31_t value = *__SIMD32(pV)++;
  82. v1 = __SXTB16(__ROR(value, 8));
  83. v2 = __SXTB16(value);
  84. #ifndef ARM_MATH_BIG_ENDIAN
  85. vo2 = __PKHTB(v1, v2, 16);
  86. vo1 = __PKHBT(v2, v1, 16);
  87. #else
  88. vo1 = __PKHTB(v1, v2, 16);
  89. vo2 = __PKHBT(v2, v1, 16);
  90. #endif
  91. in = *__SIMD32(pCnt);
  92. *__SIMD32(pCnt)++ = __QADD16(vo1, in);
  93. in = *__SIMD32(pCnt);
  94. *__SIMD32(pCnt)++ = __QADD16(vo2, in);
  95. cnt--;
  96. }
  97. cnt = length & 0x3;
  98. while (cnt > 0u)
  99. {
  100. *pCnt++ += *pV++;
  101. cnt--;
  102. }
  103. }
  104. #endif // ARM_MATH_DSP
  105. /**
  106. * @ingroup groupNN
  107. */
  108. /**
  109. * @addtogroup Pooling
  110. * @{
  111. */
  112. /**
  113. * @brief Q7 max pooling function
  114. * @param[in, out] Im_in pointer to input tensor
  115. * @param[in] dim_im_in input tensor dimention
  116. * @param[in] ch_im_in number of input tensor channels
  117. * @param[in] dim_kernel filter kernel size
  118. * @param[in] padding padding sizes
  119. * @param[in] stride convolution stride
  120. * @param[in] dim_im_out output tensor dimension
  121. * @param[in,out] bufferA pointer to buffer space for input
  122. * @param[in,out] Im_out pointer to output tensor
  123. * @return none.
  124. *
  125. * @details
  126. *
  127. * <b>Buffer size:</b>
  128. *
  129. * bufferA size: 0
  130. *
  131. * The pooling function is implemented as split x-pooling then
  132. * y-pooling.
  133. *
  134. * This pooling function is input-destructive. Input data is undefined
  135. * after calling this function.
  136. *
  137. */
  138. void
  139. arm_maxpool_q7_HWC(q7_t * Im_in,
  140. const uint16_t dim_im_in,
  141. const uint16_t ch_im_in,
  142. const uint16_t dim_kernel,
  143. const uint16_t padding,
  144. const uint16_t stride, const uint16_t dim_im_out, q7_t * bufferA, q7_t * Im_out)
  145. {
  146. #if defined (ARM_MATH_DSP)
  147. /* Run the following code for Cortex-M4 and Cortex-M7 */
  148. int16_t i_x, i_y;
  149. /* first does the pooling along x axis */
  150. for (i_y = 0; i_y < dim_im_in; i_y++)
  151. {
  152. for (i_x = 0; i_x < dim_im_out; i_x++)
  153. {
  154. /* for each output pixel */
  155. q7_t *target = Im_in + (i_y * dim_im_in + i_x) * ch_im_in;
  156. q7_t *win_start;
  157. q7_t *win_stop;
  158. if (i_x * stride - padding < 0)
  159. {
  160. win_start = target;
  161. } else
  162. {
  163. win_start = Im_in + (i_y * dim_im_in + i_x * stride - padding) * ch_im_in;
  164. }
  165. if (i_x * stride - padding + dim_kernel >= dim_im_in)
  166. {
  167. win_stop = Im_in + (i_y * dim_im_in + dim_im_in) * ch_im_in;
  168. } else
  169. {
  170. win_stop = Im_in + (i_y * dim_im_in + i_x * stride - padding + dim_kernel) * ch_im_in;
  171. }
  172. /* first step is to copy over initial data */
  173. /* arm_copy_q7(win_start, target, ch_im_in); */
  174. memmove(target, win_start, ch_im_in);
  175. /* start the max operation from the second part */
  176. win_start += ch_im_in;
  177. for (; win_start < win_stop; win_start += ch_im_in)
  178. {
  179. compare_and_replace_if_larger_q7(target, win_start, ch_im_in);
  180. }
  181. }
  182. }
  183. /* then does the pooling along y axis */
  184. for (i_y = 0; i_y < dim_im_out; i_y++)
  185. {
  186. /* for each output row */
  187. q7_t *target = Im_out + i_y * dim_im_out * ch_im_in;
  188. q7_t *row_start;
  189. q7_t *row_end;
  190. /* setting the starting row */
  191. if (i_y * stride - padding < 0)
  192. {
  193. row_start = Im_in;
  194. } else
  195. {
  196. row_start = Im_in + (i_y * stride - padding) * dim_im_in * ch_im_in;
  197. }
  198. /* setting the stopping row */
  199. if (i_y * stride - padding + dim_kernel >= dim_im_in)
  200. {
  201. row_end = Im_in + dim_im_in * dim_im_in * ch_im_in;
  202. } else
  203. {
  204. row_end = Im_in + (i_y * stride - padding + dim_kernel) * dim_im_in * ch_im_in;
  205. }
  206. /* copy over the first row */
  207. /* arm_copy_q7(row_start, target, dim_im_out * ch_im_in); */
  208. memmove(target, row_start, dim_im_out * ch_im_in);
  209. /* move over to next row */
  210. row_start += ch_im_in * dim_im_in;
  211. for (; row_start < row_end; row_start += dim_im_in * ch_im_in)
  212. {
  213. compare_and_replace_if_larger_q7(target, row_start, dim_im_out * ch_im_in);
  214. }
  215. }
  216. #else
  217. /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */
  218. int16_t i_ch_in, i_x, i_y;
  219. int16_t k_x, k_y;
  220. for (i_ch_in = 0; i_ch_in < ch_im_in; i_ch_in++)
  221. {
  222. for (i_y = 0; i_y < dim_im_out; i_y++)
  223. {
  224. for (i_x = 0; i_x < dim_im_out; i_x++)
  225. {
  226. int max = -129;
  227. for (k_y = i_y * stride - padding; k_y < i_y * stride - padding + dim_kernel; k_y++)
  228. {
  229. for (k_x = i_x * stride - padding; k_x < i_x * stride - padding + dim_kernel; k_x++)
  230. {
  231. if (k_y >= 0 && k_x >= 0 && k_y < dim_im_in && k_x < dim_im_in)
  232. {
  233. if (Im_in[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in)] > max)
  234. {
  235. max = Im_in[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in)];
  236. }
  237. }
  238. }
  239. }
  240. Im_out[i_ch_in + ch_im_in * (i_x + i_y * dim_im_out)] = max;
  241. }
  242. }
  243. }
  244. #endif /* ARM_MATH_DSP */
  245. }
  246. /**
  247. * @brief Q7 average pooling function
  248. * @param[in,out] Im_in pointer to input tensor
  249. * @param[in] dim_im_in input tensor dimention
  250. * @param[in] ch_im_in number of input tensor channels
  251. * @param[in] dim_kernel filter kernel size
  252. * @param[in] padding padding sizes
  253. * @param[in] stride convolution stride
  254. * @param[in] dim_im_out output tensor dimension
  255. * @param[in,out] bufferA pointer to buffer space for input
  256. * @param[in,out] Im_out pointer to output tensor
  257. * @return none.
  258. *
  259. * @details
  260. *
  261. * <b>Buffer size:</b>
  262. *
  263. * bufferA size: 2*dim_im_out*ch_im_in
  264. *
  265. * The pooling function is implemented as split x-pooling then
  266. * y-pooling.
  267. *
  268. * This pooling function is input-destructive. Input data is undefined
  269. * after calling this function.
  270. *
  271. */
  272. void
  273. arm_avepool_q7_HWC(q7_t * Im_in,
  274. const uint16_t dim_im_in,
  275. const uint16_t ch_im_in,
  276. const uint16_t dim_kernel,
  277. const uint16_t padding,
  278. const uint16_t stride, const uint16_t dim_im_out, q7_t * bufferA, q7_t * Im_out)
  279. {
  280. #if defined (ARM_MATH_DSP)
  281. /* Run the following code for Cortex-M4 and Cortex-M7 */
  282. q15_t *buffer = (q15_t *) bufferA;
  283. int16_t i_x, i_y;
  284. int16_t count = 0;
  285. /* first does the pooling along x axis */
  286. for (i_y = 0; i_y < dim_im_in; i_y++)
  287. {
  288. for (i_x = 0; i_x < dim_im_out; i_x++)
  289. {
  290. /* for each output pixel */
  291. q7_t *target = Im_in + (i_y * dim_im_in + i_x) * ch_im_in;
  292. q7_t *win_start;
  293. q7_t *win_stop;
  294. if (i_x * stride - padding < 0)
  295. {
  296. win_start = target;
  297. } else
  298. {
  299. win_start = Im_in + (i_y * dim_im_in + i_x * stride - padding) * ch_im_in;
  300. }
  301. if (i_x * stride - padding + dim_kernel >= dim_im_in)
  302. {
  303. win_stop = Im_in + (i_y * dim_im_in + dim_im_in) * ch_im_in;
  304. } else
  305. {
  306. win_stop = Im_in + (i_y * dim_im_in + i_x * stride - padding + dim_kernel) * ch_im_in;
  307. }
  308. /* first step is to copy over initial data */
  309. arm_q7_to_q15_no_shift(win_start, buffer, ch_im_in);
  310. count = 1;
  311. /* start the max operation from the second part */
  312. win_start += ch_im_in;
  313. for (; win_start < win_stop; win_start += ch_im_in)
  314. {
  315. accumulate_q7_to_q15(buffer, win_start, ch_im_in);
  316. count++;
  317. }
  318. buffer_scale_back_q15_to_q7(buffer, target, ch_im_in, count);
  319. }
  320. }
  321. /* then does the pooling along y axis */
  322. for (i_y = 0; i_y < dim_im_out; i_y++)
  323. {
  324. /* for each output row */
  325. q7_t *target = Im_out + i_y * dim_im_out * ch_im_in;
  326. q7_t *row_start;
  327. q7_t *row_end;
  328. /* setting the starting row */
  329. if (i_y * stride - padding < 0)
  330. {
  331. row_start = Im_in;
  332. } else
  333. {
  334. row_start = Im_in + (i_y * stride - padding) * dim_im_in * ch_im_in;
  335. }
  336. /* setting the stopping row */
  337. if (i_y * stride - padding + dim_kernel >= dim_im_in)
  338. {
  339. row_end = Im_in + dim_im_in * dim_im_in * ch_im_in;
  340. } else
  341. {
  342. row_end = Im_in + (i_y * stride - padding + dim_kernel) * dim_im_in * ch_im_in;
  343. }
  344. /* copy over the first row */
  345. arm_q7_to_q15_no_shift(row_start, buffer, dim_im_out * ch_im_in);
  346. count = 1;
  347. /* move over to next row */
  348. row_start += ch_im_in * dim_im_in;
  349. for (; row_start < row_end; row_start += dim_im_in * ch_im_in)
  350. {
  351. accumulate_q7_to_q15(buffer, row_start, dim_im_out * ch_im_in);
  352. count++;
  353. }
  354. buffer_scale_back_q15_to_q7(buffer, target, dim_im_out * ch_im_in, count);
  355. }
  356. #else
  357. /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */
  358. int16_t i_ch_in, i_x, i_y;
  359. int16_t k_x, k_y;
  360. for (i_ch_in = 0; i_ch_in < ch_im_in; i_ch_in++)
  361. {
  362. for (i_y = 0; i_y < dim_im_out; i_y++)
  363. {
  364. for (i_x = 0; i_x < dim_im_out; i_x++)
  365. {
  366. int sum = 0;
  367. int count = 0;
  368. for (k_y = i_y * stride - padding; k_y < i_y * stride - padding + dim_kernel; k_y++)
  369. {
  370. for (k_x = i_x * stride - padding; k_x < i_x * stride - padding + dim_kernel; k_x++)
  371. {
  372. if (k_y >= 0 && k_x >= 0 && k_y < dim_im_in && k_x < dim_im_in)
  373. {
  374. sum += Im_in[i_ch_in + ch_im_in * (k_x + k_y * dim_im_in)];
  375. count++;
  376. }
  377. }
  378. }
  379. Im_out[i_ch_in + ch_im_in * (i_x + i_y * dim_im_out)] = sum / count;
  380. }
  381. }
  382. }
  383. #endif /* ARM_MATH_DSP */
  384. }
  385. /**
  386. * @} end of Pooling group
  387. */