source: rtems/bsps/arm/include/cmsis_gcc.h @ 15359bb

5
Last change on this file since 15359bb was 15359bb, checked in by Sebastian Huber <sebastian.huber@…>, on 03/07/19 at 10:56:31

bsps/arm: Adjust CMSIS Doxygen groups

Update #3706.

  • Property mode set to 100644
File size: 40.5 KB
Line 
1/**************************************************************************//**
2 * @file
3 * @brief    CMSIS Cortex-M Core Function/Instruction Header File
4 * @version  V4.30
5 * @date     20. October 2015
6 ******************************************************************************/
7/* Copyright (c) 2009 - 2015 ARM LIMITED
8
9   All rights reserved.
10   Redistribution and use in source and binary forms, with or without
11   modification, are permitted provided that the following conditions are met:
12   - Redistributions of source code must retain the above copyright
13     notice, this list of conditions and the following disclaimer.
14   - Redistributions in binary form must reproduce the above copyright
15     notice, this list of conditions and the following disclaimer in the
16     documentation and/or other materials provided with the distribution.
17   - Neither the name of ARM nor the names of its contributors may be used
18     to endorse or promote products derived from this software without
19     specific prior written permission.
20   *
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32   ---------------------------------------------------------------------------*/
33
34
35#ifndef __CMSIS_GCC_H
36#define __CMSIS_GCC_H
37
38/* ignore some GCC warnings */
39#if defined ( __GNUC__ )
40#pragma GCC diagnostic push
41#pragma GCC diagnostic ignored "-Wsign-conversion"
42#pragma GCC diagnostic ignored "-Wconversion"
43#pragma GCC diagnostic ignored "-Wunused-parameter"
44#endif
45
46
47/* ###########################  Core Function Access  ########################### */
48/** \ingroup  CMSIS_Core_FunctionInterface
49    \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
50  @{
51 */
52
53/**
54  \brief   Enable IRQ Interrupts
55  \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
56           Can only be executed in Privileged modes.
57 */
58__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
59{
60  __ASM volatile ("cpsie i" : : : "memory");
61}
62
63
64/**
65  \brief   Disable IRQ Interrupts
66  \details Disables IRQ interrupts by setting the I-bit in the CPSR.
67  Can only be executed in Privileged modes.
68 */
69__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void)
70{
71  __ASM volatile ("cpsid i" : : : "memory");
72}
73
74
75/**
76  \brief   Get Control Register
77  \details Returns the content of the Control Register.
78  \return               Control Register value
79 */
80__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void)
81{
82  uint32_t result;
83
84  __ASM volatile ("MRS %0, control" : "=r" (result) );
85  return(result);
86}
87
88
89/**
90  \brief   Set Control Register
91  \details Writes the given value to the Control Register.
92  \param [in]    control  Control Register value to set
93 */
94__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control)
95{
96  __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
97}
98
99
100/**
101  \brief   Get IPSR Register
102  \details Returns the content of the IPSR Register.
103  \return               IPSR Register value
104 */
105__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void)
106{
107  uint32_t result;
108
109  __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
110  return(result);
111}
112
113
114/**
115  \brief   Get APSR Register
116  \details Returns the content of the APSR Register.
117  \return               APSR Register value
118 */
119__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
120{
121  uint32_t result;
122
123  __ASM volatile ("MRS %0, apsr" : "=r" (result) );
124  return(result);
125}
126
127
128/**
129  \brief   Get xPSR Register
130  \details Returns the content of the xPSR Register.
131
132    \return               xPSR Register value
133 */
134__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void)
135{
136  uint32_t result;
137
138  __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
139  return(result);
140}
141
142
143/**
144  \brief   Get Process Stack Pointer
145  \details Returns the current value of the Process Stack Pointer (PSP).
146  \return               PSP Register value
147 */
148__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void)
149{
150  register uint32_t result;
151
152  __ASM volatile ("MRS %0, psp\n"  : "=r" (result) );
153  return(result);
154}
155
156
157/**
158  \brief   Set Process Stack Pointer
159  \details Assigns the given value to the Process Stack Pointer (PSP).
160  \param [in]    topOfProcStack  Process Stack Pointer value to set
161 */
162__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
163{
164  __ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp");
165}
166
167
168/**
169  \brief   Get Main Stack Pointer
170  \details Returns the current value of the Main Stack Pointer (MSP).
171  \return               MSP Register value
172 */
173__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void)
174{
175  register uint32_t result;
176
177  __ASM volatile ("MRS %0, msp\n" : "=r" (result) );
178  return(result);
179}
180
181
182/**
183  \brief   Set Main Stack Pointer
184  \details Assigns the given value to the Main Stack Pointer (MSP).
185
186    \param [in]    topOfMainStack  Main Stack Pointer value to set
187 */
188__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
189{
190  __ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp");
191}
192
193
194/**
195  \brief   Get Priority Mask
196  \details Returns the current state of the priority mask bit from the Priority Mask Register.
197  \return               Priority Mask value
198 */
199__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void)
200{
201  uint32_t result;
202
203  __ASM volatile ("MRS %0, primask" : "=r" (result) );
204  return(result);
205}
206
207
208/**
209  \brief   Set Priority Mask
210  \details Assigns the given value to the Priority Mask Register.
211  \param [in]    priMask  Priority Mask
212 */
213__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
214{
215  __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
216}
217
218
219#if       (__CORTEX_M >= 0x03U)
220
221/**
222  \brief   Enable FIQ
223  \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
224           Can only be executed in Privileged modes.
225 */
226__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)
227{
228  __ASM volatile ("cpsie f" : : : "memory");
229}
230
231
232/**
233  \brief   Disable FIQ
234  \details Disables FIQ interrupts by setting the F-bit in the CPSR.
235           Can only be executed in Privileged modes.
236 */
237__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void)
238{
239  __ASM volatile ("cpsid f" : : : "memory");
240}
241
242
243/**
244  \brief   Get Base Priority
245  \details Returns the current value of the Base Priority register.
246  \return               Base Priority register value
247 */
248__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void)
249{
250  uint32_t result;
251
252  __ASM volatile ("MRS %0, basepri" : "=r" (result) );
253  return(result);
254}
255
256
257/**
258  \brief   Set Base Priority
259  \details Assigns the given value to the Base Priority register.
260  \param [in]    basePri  Base Priority value to set
261 */
262__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
263{
264  __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
265}
266
267
268/**
269  \brief   Set Base Priority with condition
270  \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
271           or the new value increases the BASEPRI priority level.
272  \param [in]    basePri  Base Priority value to set
273 */
274__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value)
275{
276  __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory");
277}
278
279
280/**
281  \brief   Get Fault Mask
282  \details Returns the current value of the Fault Mask register.
283  \return               Fault Mask register value
284 */
285__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void)
286{
287  uint32_t result;
288
289  __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
290  return(result);
291}
292
293
294/**
295  \brief   Set Fault Mask
296  \details Assigns the given value to the Fault Mask register.
297  \param [in]    faultMask  Fault Mask value to set
298 */
299__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
300{
301  __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
302}
303
304#endif /* (__CORTEX_M >= 0x03U) */
305
306
307#if       (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U)
308
309/**
310  \brief   Get FPSCR
311  \details Returns the current value of the Floating Point Status/Control register.
312  \return               Floating Point Status/Control register value
313 */
314__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
315{
316#if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
317  uint32_t result;
318
319  /* Empty asm statement works as a scheduling barrier */
320  __ASM volatile ("");
321  __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
322  __ASM volatile ("");
323  return(result);
324#else
325   return(0);
326#endif
327}
328
329
330/**
331  \brief   Set FPSCR
332  \details Assigns the given value to the Floating Point Status/Control register.
333  \param [in]    fpscr  Floating Point Status/Control value to set
334 */
335__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
336{
337#if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
338  /* Empty asm statement works as a scheduling barrier */
339  __ASM volatile ("");
340  __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");
341  __ASM volatile ("");
342#endif
343}
344
345#endif /* (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U) */
346
347
348
349/*@} end of CMSIS_Core_RegAccFunctions */
350
351
352/* ##########################  Core Instruction Access  ######################### */
353/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
354  \ingroup CMSIS
355  Access to dedicated instructions
356  @{
357*/
358
359/* Define macros for porting to both thumb1 and thumb2.
360 * For thumb1, use low register (r0-r7), specified by constraint "l"
361 * Otherwise, use general registers, specified by constraint "r" */
362#if defined (__thumb__) && !defined (__thumb2__)
363#define __CMSIS_GCC_OUT_REG(r) "=l" (r)
364#define __CMSIS_GCC_USE_REG(r) "l" (r)
365#else
366#define __CMSIS_GCC_OUT_REG(r) "=r" (r)
367#define __CMSIS_GCC_USE_REG(r) "r" (r)
368#endif
369
370/**
371  \brief   No Operation
372  \details No Operation does nothing. This instruction can be used for code alignment purposes.
373 */
374__attribute__((always_inline)) __STATIC_INLINE void __NOP(void)
375{
376  __ASM volatile ("nop");
377}
378
379
380/**
381  \brief   Wait For Interrupt
382  \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
383 */
384__attribute__((always_inline)) __STATIC_INLINE void __WFI(void)
385{
386  __ASM volatile ("wfi");
387}
388
389
390/**
391  \brief   Wait For Event
392  \details Wait For Event is a hint instruction that permits the processor to enter
393    a low-power state until one of a number of events occurs.
394 */
395__attribute__((always_inline)) __STATIC_INLINE void __WFE(void)
396{
397  __ASM volatile ("wfe");
398}
399
400
401/**
402  \brief   Send Event
403  \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
404 */
405__attribute__((always_inline)) __STATIC_INLINE void __SEV(void)
406{
407  __ASM volatile ("sev");
408}
409
410
411/**
412  \brief   Instruction Synchronization Barrier
413  \details Instruction Synchronization Barrier flushes the pipeline in the processor,
414           so that all instructions following the ISB are fetched from cache or memory,
415           after the instruction has been completed.
416 */
417__attribute__((always_inline)) __STATIC_INLINE void __ISB(void)
418{
419  __ASM volatile ("isb 0xF":::"memory");
420}
421
422
423/**
424  \brief   Data Synchronization Barrier
425  \details Acts as a special kind of Data Memory Barrier.
426           It completes when all explicit memory accesses before this instruction complete.
427 */
428__attribute__((always_inline)) __STATIC_INLINE void __DSB(void)
429{
430  __ASM volatile ("dsb 0xF":::"memory");
431}
432
433
434/**
435  \brief   Data Memory Barrier
436  \details Ensures the apparent order of the explicit memory operations before
437           and after the instruction, without ensuring their completion.
438 */
439__attribute__((always_inline)) __STATIC_INLINE void __DMB(void)
440{
441  __ASM volatile ("dmb 0xF":::"memory");
442}
443
444
445/**
446  \brief   Reverse byte order (32 bit)
447  \details Reverses the byte order in integer value.
448  \param [in]    value  Value to reverse
449  \return               Reversed value
450 */
451__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value)
452{
453#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
454  return __builtin_bswap32(value);
455#else
456  uint32_t result;
457
458  __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
459  return(result);
460#endif
461}
462
463
464/**
465  \brief   Reverse byte order (16 bit)
466  \details Reverses the byte order in two unsigned short values.
467  \param [in]    value  Value to reverse
468  \return               Reversed value
469 */
470__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)
471{
472  uint32_t result;
473
474  __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
475  return(result);
476}
477
478
479/**
480  \brief   Reverse byte order in signed short value
481  \details Reverses the byte order in a signed short value with sign extension to integer.
482  \param [in]    value  Value to reverse
483  \return               Reversed value
484 */
485__attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)
486{
487#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
488  return (short)__builtin_bswap16(value);
489#else
490  int32_t result;
491
492  __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
493  return(result);
494#endif
495}
496
497
498/**
499  \brief   Rotate Right in unsigned value (32 bit)
500  \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
501  \param [in]    value  Value to rotate
502  \param [in]    value  Number of Bits to rotate
503  \return               Rotated value
504 */
505__attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
506{
507  return (op1 >> op2) | (op1 << (32U - op2));
508}
509
510
511/**
512  \brief   Breakpoint
513  \details Causes the processor to enter Debug state.
514           Debug tools can use this to investigate system state when the instruction at a particular address is reached.
515  \param [in]    value  is ignored by the processor.
516                 If required, a debugger can use it to store additional information about the breakpoint.
517 */
518#define __BKPT(value)                       __ASM volatile ("bkpt "#value)
519
520
521/**
522  \brief   Reverse bit order of value
523  \details Reverses the bit order of the given value.
524  \param [in]    value  Value to reverse
525  \return               Reversed value
526 */
527__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
528{
529  uint32_t result;
530
531#if       (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
532   __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
533#else
534  int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
535
536  result = value;                      /* r will be reversed bits of v; first get LSB of v */
537  for (value >>= 1U; value; value >>= 1U)
538  {
539    result <<= 1U;
540    result |= value & 1U;
541    s--;
542  }
543  result <<= s;                        /* shift when v's highest bits are zero */
544#endif
545  return(result);
546}
547
548
549/**
550  \brief   Count leading zeros
551  \details Counts the number of leading zeros of a data value.
552  \param [in]  value  Value to count the leading zeros
553  \return             number of leading zeros in value
554 */
555#define __CLZ             __builtin_clz
556
557
558#if       (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
559
560/**
561  \brief   LDR Exclusive (8 bit)
562  \details Executes a exclusive LDR instruction for 8 bit value.
563  \param [in]    ptr  Pointer to data
564  \return             value of type uint8_t at (*ptr)
565 */
566__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
567{
568    uint32_t result;
569
570#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
571   __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
572#else
573    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
574       accepted by assembler. So has to use following less efficient pattern.
575    */
576   __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
577#endif
578   return ((uint8_t) result);    /* Add explicit type cast here */
579}
580
581
582/**
583  \brief   LDR Exclusive (16 bit)
584  \details Executes a exclusive LDR instruction for 16 bit values.
585  \param [in]    ptr  Pointer to data
586  \return        value of type uint16_t at (*ptr)
587 */
588__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
589{
590    uint32_t result;
591
592#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
593   __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
594#else
595    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
596       accepted by assembler. So has to use following less efficient pattern.
597    */
598   __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
599#endif
600   return ((uint16_t) result);    /* Add explicit type cast here */
601}
602
603
604/**
605  \brief   LDR Exclusive (32 bit)
606  \details Executes a exclusive LDR instruction for 32 bit values.
607  \param [in]    ptr  Pointer to data
608  \return        value of type uint32_t at (*ptr)
609 */
610__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
611{
612    uint32_t result;
613
614   __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
615   return(result);
616}
617
618
619/**
620  \brief   STR Exclusive (8 bit)
621  \details Executes a exclusive STR instruction for 8 bit values.
622  \param [in]  value  Value to store
623  \param [in]    ptr  Pointer to location
624  \return          0  Function succeeded
625  \return          1  Function failed
626 */
627__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
628{
629   uint32_t result;
630
631   __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
632   return(result);
633}
634
635
636/**
637  \brief   STR Exclusive (16 bit)
638  \details Executes a exclusive STR instruction for 16 bit values.
639  \param [in]  value  Value to store
640  \param [in]    ptr  Pointer to location
641  \return          0  Function succeeded
642  \return          1  Function failed
643 */
644__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
645{
646   uint32_t result;
647
648   __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
649   return(result);
650}
651
652
653/**
654  \brief   STR Exclusive (32 bit)
655  \details Executes a exclusive STR instruction for 32 bit values.
656  \param [in]  value  Value to store
657  \param [in]    ptr  Pointer to location
658  \return          0  Function succeeded
659  \return          1  Function failed
660 */
661__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
662{
663   uint32_t result;
664
665   __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
666   return(result);
667}
668
669
670/**
671  \brief   Remove the exclusive lock
672  \details Removes the exclusive lock which is created by LDREX.
673 */
674__attribute__((always_inline)) __STATIC_INLINE void __CLREX(void)
675{
676  __ASM volatile ("clrex" ::: "memory");
677}
678
679
680/**
681  \brief   Signed Saturate
682  \details Saturates a signed value.
683  \param [in]  value  Value to be saturated
684  \param [in]    sat  Bit position to saturate to (1..32)
685  \return             Saturated value
686 */
687#define __SSAT(ARG1,ARG2) \
688({                          \
689  uint32_t __RES, __ARG1 = (ARG1); \
690  __ASM ("ssat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
691  __RES; \
692 })
693
694
695/**
696  \brief   Unsigned Saturate
697  \details Saturates an unsigned value.
698  \param [in]  value  Value to be saturated
699  \param [in]    sat  Bit position to saturate to (0..31)
700  \return             Saturated value
701 */
702#define __USAT(ARG1,ARG2) \
703({                          \
704  uint32_t __RES, __ARG1 = (ARG1); \
705  __ASM ("usat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
706  __RES; \
707 })
708
709
710/**
711  \brief   Rotate Right with Extend (32 bit)
712  \details Moves each bit of a bitstring right by one bit.
713           The carry input is shifted in at the left end of the bitstring.
714  \param [in]    value  Value to rotate
715  \return               Rotated value
716 */
717__attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)
718{
719  uint32_t result;
720
721  __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
722  return(result);
723}
724
725
726/**
727  \brief   LDRT Unprivileged (8 bit)
728  \details Executes a Unprivileged LDRT instruction for 8 bit value.
729  \param [in]    ptr  Pointer to data
730  \return             value of type uint8_t at (*ptr)
731 */
732__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
733{
734    uint32_t result;
735
736#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
737   __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) );
738#else
739    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
740       accepted by assembler. So has to use following less efficient pattern.
741    */
742   __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
743#endif
744   return ((uint8_t) result);    /* Add explicit type cast here */
745}
746
747
748/**
749  \brief   LDRT Unprivileged (16 bit)
750  \details Executes a Unprivileged LDRT instruction for 16 bit values.
751  \param [in]    ptr  Pointer to data
752  \return        value of type uint16_t at (*ptr)
753 */
754__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
755{
756    uint32_t result;
757
758#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
759   __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) );
760#else
761    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
762       accepted by assembler. So has to use following less efficient pattern.
763    */
764   __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
765#endif
766   return ((uint16_t) result);    /* Add explicit type cast here */
767}
768
769
770/**
771  \brief   LDRT Unprivileged (32 bit)
772  \details Executes a Unprivileged LDRT instruction for 32 bit values.
773  \param [in]    ptr  Pointer to data
774  \return        value of type uint32_t at (*ptr)
775 */
776__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
777{
778    uint32_t result;
779
780   __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) );
781   return(result);
782}
783
784
785/**
786  \brief   STRT Unprivileged (8 bit)
787  \details Executes a Unprivileged STRT instruction for 8 bit values.
788  \param [in]  value  Value to store
789  \param [in]    ptr  Pointer to location
790 */
791__attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
792{
793   __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
794}
795
796
797/**
798  \brief   STRT Unprivileged (16 bit)
799  \details Executes a Unprivileged STRT instruction for 16 bit values.
800  \param [in]  value  Value to store
801  \param [in]    ptr  Pointer to location
802 */
803__attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
804{
805   __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
806}
807
808
809/**
810  \brief   STRT Unprivileged (32 bit)
811  \details Executes a Unprivileged STRT instruction for 32 bit values.
812  \param [in]  value  Value to store
813  \param [in]    ptr  Pointer to location
814 */
815__attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
816{
817   __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) );
818}
819
820#endif /* (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U) */
821
822/*@}*/ /* end of group CMSIS_Core_InstructionInterface */
823
824
825/* ###################  Compiler specific Intrinsics  ########################### */
826/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
827  \ingroup CMSIS
828  Access to dedicated SIMD instructions
829  @{
830*/
831
832#if (__CORTEX_M >= 0x04U)  /* only for Cortex-M4 and above */
833
834__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
835{
836  uint32_t result;
837
838  __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
839  return(result);
840}
841
842__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
843{
844  uint32_t result;
845
846  __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
847  return(result);
848}
849
850__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
851{
852  uint32_t result;
853
854  __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
855  return(result);
856}
857
858__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
859{
860  uint32_t result;
861
862  __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
863  return(result);
864}
865
866__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
867{
868  uint32_t result;
869
870  __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
871  return(result);
872}
873
874__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
875{
876  uint32_t result;
877
878  __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
879  return(result);
880}
881
882
883__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
884{
885  uint32_t result;
886
887  __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
888  return(result);
889}
890
891__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
892{
893  uint32_t result;
894
895  __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
896  return(result);
897}
898
899__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
900{
901  uint32_t result;
902
903  __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
904  return(result);
905}
906
907__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
908{
909  uint32_t result;
910
911  __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
912  return(result);
913}
914
915__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
916{
917  uint32_t result;
918
919  __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
920  return(result);
921}
922
923__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
924{
925  uint32_t result;
926
927  __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
928  return(result);
929}
930
931
932__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
933{
934  uint32_t result;
935
936  __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
937  return(result);
938}
939
940__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
941{
942  uint32_t result;
943
944  __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
945  return(result);
946}
947
948__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
949{
950  uint32_t result;
951
952  __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
953  return(result);
954}
955
956__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
957{
958  uint32_t result;
959
960  __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
961  return(result);
962}
963
964__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
965{
966  uint32_t result;
967
968  __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
969  return(result);
970}
971
972__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
973{
974  uint32_t result;
975
976  __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
977  return(result);
978}
979
980__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
981{
982  uint32_t result;
983
984  __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
985  return(result);
986}
987
988__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
989{
990  uint32_t result;
991
992  __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
993  return(result);
994}
995
996__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
997{
998  uint32_t result;
999
1000  __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1001  return(result);
1002}
1003
1004__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1005{
1006  uint32_t result;
1007
1008  __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1009  return(result);
1010}
1011
1012__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1013{
1014  uint32_t result;
1015
1016  __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1017  return(result);
1018}
1019
1020__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1021{
1022  uint32_t result;
1023
1024  __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1025  return(result);
1026}
1027
1028__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1029{
1030  uint32_t result;
1031
1032  __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1033  return(result);
1034}
1035
1036__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1037{
1038  uint32_t result;
1039
1040  __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1041  return(result);
1042}
1043
1044__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1045{
1046  uint32_t result;
1047
1048  __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1049  return(result);
1050}
1051
1052__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1053{
1054  uint32_t result;
1055
1056  __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1057  return(result);
1058}
1059
1060__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1061{
1062  uint32_t result;
1063
1064  __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1065  return(result);
1066}
1067
1068__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1069{
1070  uint32_t result;
1071
1072  __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1073  return(result);
1074}
1075
1076__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1077{
1078  uint32_t result;
1079
1080  __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1081  return(result);
1082}
1083
1084__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1085{
1086  uint32_t result;
1087
1088  __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1089  return(result);
1090}
1091
1092__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1093{
1094  uint32_t result;
1095
1096  __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1097  return(result);
1098}
1099
1100__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1101{
1102  uint32_t result;
1103
1104  __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1105  return(result);
1106}
1107
1108__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1109{
1110  uint32_t result;
1111
1112  __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1113  return(result);
1114}
1115
1116__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1117{
1118  uint32_t result;
1119
1120  __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1121  return(result);
1122}
1123
1124__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1125{
1126  uint32_t result;
1127
1128  __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1129  return(result);
1130}
1131
1132__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1133{
1134  uint32_t result;
1135
1136  __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1137  return(result);
1138}
1139
1140#define __SSAT16(ARG1,ARG2) \
1141({                          \
1142  int32_t __RES, __ARG1 = (ARG1); \
1143  __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
1144  __RES; \
1145 })
1146
1147#define __USAT16(ARG1,ARG2) \
1148({                          \
1149  uint32_t __RES, __ARG1 = (ARG1); \
1150  __ASM ("usat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
1151  __RES; \
1152 })
1153
1154__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
1155{
1156  uint32_t result;
1157
1158  __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1159  return(result);
1160}
1161
1162__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1163{
1164  uint32_t result;
1165
1166  __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1167  return(result);
1168}
1169
1170__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
1171{
1172  uint32_t result;
1173
1174  __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1175  return(result);
1176}
1177
1178__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
1179{
1180  uint32_t result;
1181
1182  __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1183  return(result);
1184}
1185
1186__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD  (uint32_t op1, uint32_t op2)
1187{
1188  uint32_t result;
1189
1190  __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1191  return(result);
1192}
1193
1194__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
1195{
1196  uint32_t result;
1197
1198  __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1199  return(result);
1200}
1201
1202__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
1203{
1204  uint32_t result;
1205
1206  __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1207  return(result);
1208}
1209
1210__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
1211{
1212  uint32_t result;
1213
1214  __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1215  return(result);
1216}
1217
1218__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
1219{
1220  union llreg_u{
1221    uint32_t w32[2];
1222    uint64_t w64;
1223  } llr;
1224  llr.w64 = acc;
1225
1226#ifndef __ARMEB__   /* Little endian */
1227  __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1228#else               /* Big endian */
1229  __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1230#endif
1231
1232  return(llr.w64);
1233}
1234
1235__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
1236{
1237  union llreg_u{
1238    uint32_t w32[2];
1239    uint64_t w64;
1240  } llr;
1241  llr.w64 = acc;
1242
1243#ifndef __ARMEB__   /* Little endian */
1244  __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1245#else               /* Big endian */
1246  __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1247#endif
1248
1249  return(llr.w64);
1250}
1251
1252__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD  (uint32_t op1, uint32_t op2)
1253{
1254  uint32_t result;
1255
1256  __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1257  return(result);
1258}
1259
1260__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
1261{
1262  uint32_t result;
1263
1264  __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1265  return(result);
1266}
1267
1268__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
1269{
1270  uint32_t result;
1271
1272  __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1273  return(result);
1274}
1275
1276__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
1277{
1278  uint32_t result;
1279
1280  __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1281  return(result);
1282}
1283
1284__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
1285{
1286  union llreg_u{
1287    uint32_t w32[2];
1288    uint64_t w64;
1289  } llr;
1290  llr.w64 = acc;
1291
1292#ifndef __ARMEB__   /* Little endian */
1293  __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1294#else               /* Big endian */
1295  __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1296#endif
1297
1298  return(llr.w64);
1299}
1300
1301__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
1302{
1303  union llreg_u{
1304    uint32_t w32[2];
1305    uint64_t w64;
1306  } llr;
1307  llr.w64 = acc;
1308
1309#ifndef __ARMEB__   /* Little endian */
1310  __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1311#else               /* Big endian */
1312  __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1313#endif
1314
1315  return(llr.w64);
1316}
1317
1318__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL  (uint32_t op1, uint32_t op2)
1319{
1320  uint32_t result;
1321
1322  __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1323  return(result);
1324}
1325
1326__attribute__( ( always_inline ) ) __STATIC_INLINE  int32_t __QADD( int32_t op1,  int32_t op2)
1327{
1328  int32_t result;
1329
1330  __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1331  return(result);
1332}
1333
1334__attribute__( ( always_inline ) ) __STATIC_INLINE  int32_t __QSUB( int32_t op1,  int32_t op2)
1335{
1336  int32_t result;
1337
1338  __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1339  return(result);
1340}
1341
1342#define __PKHBT(ARG1,ARG2,ARG3) \
1343({                          \
1344  uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1345  __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
1346  __RES; \
1347 })
1348
1349#define __PKHTB(ARG1,ARG2,ARG3) \
1350({                          \
1351  uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1352  if (ARG3 == 0) \
1353    __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2)  ); \
1354  else \
1355    __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
1356  __RES; \
1357 })
1358
1359__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
1360{
1361 int32_t result;
1362
1363 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r"  (op1), "r" (op2), "r" (op3) );
1364 return(result);
1365}
1366
1367#endif /* (__CORTEX_M >= 0x04) */
1368/*@} end of group CMSIS_SIMD_intrinsics */
1369
1370
1371#if defined ( __GNUC__ )
1372#pragma GCC diagnostic pop
1373#endif
1374
1375#endif /* __CMSIS_GCC_H */
Note: See TracBrowser for help on using the repository browser.