123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697 |
- /**************************************************************************//**
- * @file core_cmSimd.h
- * @brief CMSIS Cortex-M SIMD Header File
- * @version V4.00
- * @date 22. August 2014
- *
- * @note
- *
- ******************************************************************************/
- /* Copyright (c) 2009 - 2014 ARM LIMITED
- All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- - Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- - Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- - Neither the name of ARM nor the names of its contributors may be used
- to endorse or promote products derived from this software without
- specific prior written permission.
- *
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
- ---------------------------------------------------------------------------*/
- #if defined ( __ICCARM__ )
- #pragma system_include /* treat file as system include file for MISRA check */
- #endif
- #ifndef __CORE_CMSIMD_H
- #define __CORE_CMSIMD_H
- #ifdef __cplusplus
- extern "C" {
- #endif
- /*******************************************************************************
- * Hardware Abstraction Layer
- ******************************************************************************/
- /* ################### Compiler specific Intrinsics ########################### */
- /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
- Access to dedicated SIMD instructions
- @{
- */
- #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
- /* ARM armcc specific functions */
- #define __SADD8 __sadd8
- #define __QADD8 __qadd8
- #define __SHADD8 __shadd8
- #define __UADD8 __uadd8
- #define __UQADD8 __uqadd8
- #define __UHADD8 __uhadd8
- #define __SSUB8 __ssub8
- #define __QSUB8 __qsub8
- #define __SHSUB8 __shsub8
- #define __USUB8 __usub8
- #define __UQSUB8 __uqsub8
- #define __UHSUB8 __uhsub8
- #define __SADD16 __sadd16
- #define __QADD16 __qadd16
- #define __SHADD16 __shadd16
- #define __UADD16 __uadd16
- #define __UQADD16 __uqadd16
- #define __UHADD16 __uhadd16
- #define __SSUB16 __ssub16
- #define __QSUB16 __qsub16
- #define __SHSUB16 __shsub16
- #define __USUB16 __usub16
- #define __UQSUB16 __uqsub16
- #define __UHSUB16 __uhsub16
- #define __SASX __sasx
- #define __QASX __qasx
- #define __SHASX __shasx
- #define __UASX __uasx
- #define __UQASX __uqasx
- #define __UHASX __uhasx
- #define __SSAX __ssax
- #define __QSAX __qsax
- #define __SHSAX __shsax
- #define __USAX __usax
- #define __UQSAX __uqsax
- #define __UHSAX __uhsax
- #define __USAD8 __usad8
- #define __USADA8 __usada8
- #define __SSAT16 __ssat16
- #define __USAT16 __usat16
- #define __UXTB16 __uxtb16
- #define __UXTAB16 __uxtab16
- #define __SXTB16 __sxtb16
- #define __SXTAB16 __sxtab16
- #define __SMUAD __smuad
- #define __SMUADX __smuadx
- #define __SMLAD __smlad
- #define __SMLADX __smladx
- #define __SMLALD __smlald
- #define __SMLALDX __smlaldx
- #define __SMUSD __smusd
- #define __SMUSDX __smusdx
- #define __SMLSD __smlsd
- #define __SMLSDX __smlsdx
- #define __SMLSLD __smlsld
- #define __SMLSLDX __smlsldx
- #define __SEL __sel
- #define __QADD __qadd
- #define __QSUB __qsub
- #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
- ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
- #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
- ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
- #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
- ((int64_t)(ARG3) << 32) ) >> 32))
- #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
- /* GNU gcc specific functions */
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
- {
- uint32_t result;
- __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
- return(result);
- }
- #define __SSAT16(ARG1,ARG2) \
- ({ \
- uint32_t __RES, __ARG1 = (ARG1); \
- __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
- __RES; \
- })
- #define __USAT16(ARG1,ARG2) \
- ({ \
- uint32_t __RES, __ARG1 = (ARG1); \
- __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
- __RES; \
- })
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
- {
- uint32_t result;
- __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
- {
- uint32_t result;
- __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
- {
- uint32_t result;
- __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
- {
- uint32_t result;
- __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
- {
- union llreg_u{
- uint32_t w32[2];
- uint64_t w64;
- } llr;
- llr.w64 = acc;
- #ifndef __ARMEB__ // Little endian
- __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
- #else // Big endian
- __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
- #endif
- return(llr.w64);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
- {
- union llreg_u{
- uint32_t w32[2];
- uint64_t w64;
- } llr;
- llr.w64 = acc;
- #ifndef __ARMEB__ // Little endian
- __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
- #else // Big endian
- __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
- #endif
- return(llr.w64);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
- {
- uint32_t result;
- __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
- {
- uint32_t result;
- __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
- {
- union llreg_u{
- uint32_t w32[2];
- uint64_t w64;
- } llr;
- llr.w64 = acc;
- #ifndef __ARMEB__ // Little endian
- __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
- #else // Big endian
- __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
- #endif
- return(llr.w64);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
- {
- union llreg_u{
- uint32_t w32[2];
- uint64_t w64;
- } llr;
- llr.w64 = acc;
- #ifndef __ARMEB__ // Little endian
- __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
- #else // Big endian
- __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
- #endif
- return(llr.w64);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
- {
- uint32_t result;
- __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
- return(result);
- }
- #define __PKHBT(ARG1,ARG2,ARG3) \
- ({ \
- uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
- __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
- __RES; \
- })
- #define __PKHTB(ARG1,ARG2,ARG3) \
- ({ \
- uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
- if (ARG3 == 0) \
- __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
- else \
- __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
- __RES; \
- })
- __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
- {
- int32_t result;
- __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
- return(result);
- }
- #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
- /* IAR iccarm specific functions */
- #include <cmsis_iar.h>
- #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
- /* TI CCS specific functions */
- #include <cmsis_ccs.h>
- #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
- /* TASKING carm specific functions */
- /* not yet supported */
- #elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/
- /* Cosmic specific functions */
- #include <cmsis_csm.h>
- #endif
- /*@} end of group CMSIS_SIMD_intrinsics */
- #ifdef __cplusplus
- }
- #endif
- #endif /* __CORE_CMSIMD_H */
|