source: rtems/cpukit/score/cpu/arm/include/rtems/score/cpu.h @ 4c89fbcd

Last change on this file since 4c89fbcd was 4c89fbcd, checked in by Sebastian Huber <sebastian.huber@…>, on 09/27/22 at 05:43:37

score: Add CPU_THREAD_LOCAL_STORAGE_VARIANT

Update #3835.

  • Property mode set to 100644
File size: 16.3 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @brief ARM Architecture Support API
7 */
8
9/*
10 *  This include file contains information pertaining to the ARM
11 *  processor.
12 *
13 *  Copyright (c) 2009, 2017 embedded brains GmbH
14 *
15 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
16 *
17 *  Copyright (c) 2006 On-Line Applications Research Corporation (OAR)
18 *
19 *  Copyright (c) 2002 Advent Networks, Inc.
20 *        Jay Monkman <jmonkman@adventnetworks.com>
21 *
22 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
23 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
24 *
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions
27 * are met:
28 * 1. Redistributions of source code must retain the above copyright
29 *    notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 *    notice, this list of conditions and the following disclaimer in the
32 *    documentation and/or other materials provided with the distribution.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
35 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
38 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
41 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
42 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
44 * POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#ifndef _RTEMS_SCORE_CPU_H
49#define _RTEMS_SCORE_CPU_H
50
51#include <rtems/score/basedefs.h>
52#if defined(RTEMS_PARAVIRT)
53#include <rtems/score/paravirt.h>
54#endif
55#include <rtems/score/arm.h>
56
57/**
58 * @addtogroup RTEMSScoreCPUARM
59 *
60 * @{
61 */
62
63#if defined(ARM_MULTILIB_ARCH_V4)
64
65#if defined(__thumb__) && !defined(__thumb2__)
66  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
67  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
68  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
69  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
70  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
71#else
72  #define ARM_SWITCH_REGISTERS
73  #define ARM_SWITCH_TO_ARM
74  #define ARM_SWITCH_BACK
75  #define ARM_SWITCH_OUTPUT
76  #define ARM_SWITCH_ADDITIONAL_OUTPUT
77#endif
78
79/**
80 * @name Program Status Register
81 */
82/**@{**/
83
84#define ARM_PSR_N (1 << 31)
85#define ARM_PSR_Z (1 << 30)
86#define ARM_PSR_C (1 << 29)
87#define ARM_PSR_V (1 << 28)
88#define ARM_PSR_Q (1 << 27)
89#define ARM_PSR_J (1 << 24)
90#define ARM_PSR_GE_SHIFT 16
91#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
92#define ARM_PSR_E (1 << 9)
93#define ARM_PSR_A (1 << 8)
94#define ARM_PSR_I (1 << 7)
95#define ARM_PSR_F (1 << 6)
96#define ARM_PSR_T (1 << 5)
97#define ARM_PSR_M_SHIFT 0
98#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
99#define ARM_PSR_M_USR 0x10
100#define ARM_PSR_M_FIQ 0x11
101#define ARM_PSR_M_IRQ 0x12
102#define ARM_PSR_M_SVC 0x13
103#define ARM_PSR_M_ABT 0x17
104#define ARM_PSR_M_HYP 0x1a
105#define ARM_PSR_M_UND 0x1b
106#define ARM_PSR_M_SYS 0x1f
107
108/** @} */
109
110#endif /* defined(ARM_MULTILIB_ARCH_V4) */
111
112/*
113 *  The ARM uses the PIC interrupt model.
114 */
115#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
116
117#define CPU_ISR_PASSES_FRAME_POINTER FALSE
118
119#define CPU_HARDWARE_FP FALSE
120
121#define CPU_SOFTWARE_FP FALSE
122
123#define CPU_ALL_TASKS_ARE_FP FALSE
124
125#define CPU_IDLE_TASK_IS_FP FALSE
126
127#define CPU_USE_DEFERRED_FP_SWITCH FALSE
128
129#define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
130
131#define CPU_STACK_GROWS_UP FALSE
132
133#if defined(ARM_MULTILIB_CACHE_LINE_MAX_64)
134  #define CPU_CACHE_LINE_BYTES 64
135#else
136  #define CPU_CACHE_LINE_BYTES 32
137#endif
138
139#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
140
141#define CPU_MODES_INTERRUPT_MASK 0x1
142
143#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
144
145#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
146
147#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
148
149/* AAPCS, section 4.1, Fundamental Data Types */
150#define CPU_SIZEOF_POINTER 4
151
152/* AAPCS, section 4.1, Fundamental Data Types */
153#define CPU_ALIGNMENT 8
154
155#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
156
157/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
158#define CPU_STACK_ALIGNMENT 8
159
160#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
161
162/*
163 * Bitfield handler macros.
164 *
165 * If we had a particularly fast function for finding the first
166 * bit set in a word, it would go here. Since we don't (*), we'll
167 * just use the universal macros.
168 *
169 * (*) On ARM V5 and later, there's a CLZ function which could be
170 *     used to implement much quicker than the default macro.
171 */
172
173#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
174
175#define CPU_USE_LIBC_INIT_FINI_ARRAY TRUE
176
177#define CPU_MAXIMUM_PROCESSORS 32
178
179#define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
180
181#ifdef ARM_MULTILIB_VFP
182  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
183#endif
184
185#ifdef ARM_MULTILIB_ARCH_V4
186  #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 40
187#endif
188
189#ifdef RTEMS_SMP
190  #if defined(ARM_MULTILIB_VFP)
191    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
192  #else
193    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
194  #endif
195#endif
196
197#define ARM_EXCEPTION_FRAME_SIZE 80
198
199#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
200
201#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
202
203#define ARM_VFP_CONTEXT_SIZE 264
204
205#ifndef ASM
206
207#ifdef __cplusplus
208extern "C" {
209#endif
210
211typedef struct {
212#if defined(ARM_MULTILIB_ARCH_V4)
213  uint32_t register_r4;
214  uint32_t register_r5;
215  uint32_t register_r6;
216  uint32_t register_r7;
217  uint32_t register_r8;
218  uint32_t register_r9;
219  uint32_t register_r10;
220  uint32_t register_fp;
221  uint32_t register_sp;
222  uint32_t register_lr;
223  uint32_t isr_dispatch_disable;
224#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
225  uint32_t register_r4;
226  uint32_t register_r5;
227  uint32_t register_r6;
228  uint32_t register_r7;
229  uint32_t register_r8;
230  uint32_t register_r9;
231  uint32_t register_r10;
232  uint32_t register_r11;
233  void *register_lr;
234  void *register_sp;
235  uint32_t isr_nest_level;
236#else
237  void *register_sp;
238#endif
239  uint32_t thread_id;
240#ifdef ARM_MULTILIB_VFP
241  uint64_t register_d8;
242  uint64_t register_d9;
243  uint64_t register_d10;
244  uint64_t register_d11;
245  uint64_t register_d12;
246  uint64_t register_d13;
247  uint64_t register_d14;
248  uint64_t register_d15;
249#endif
250#ifdef RTEMS_SMP
251  volatile bool is_executing;
252#endif
253} Context_Control;
254
255static inline void _ARM_Data_memory_barrier( void )
256{
257#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
258  __asm__ volatile ( "dmb" : : : "memory" );
259#else
260  RTEMS_COMPILER_MEMORY_BARRIER();
261#endif
262}
263
264static inline void _ARM_Data_synchronization_barrier( void )
265{
266#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
267  __asm__ volatile ( "dsb" : : : "memory" );
268#else
269  RTEMS_COMPILER_MEMORY_BARRIER();
270#endif
271}
272
273static inline void _ARM_Instruction_synchronization_barrier( void )
274{
275#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
276  __asm__ volatile ( "isb" : : : "memory" );
277#else
278  RTEMS_COMPILER_MEMORY_BARRIER();
279#endif
280}
281
282#if defined(ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE)
283uint32_t arm_interrupt_disable( void );
284void arm_interrupt_enable( uint32_t level );
285void arm_interrupt_flash( uint32_t level );
286#else
287static inline uint32_t arm_interrupt_disable( void )
288{
289  uint32_t level;
290
291#if defined(ARM_MULTILIB_ARCH_V4)
292  /*
293   * Disable only normal interrupts (IRQ).
294   *
295   * In order to support fast interrupts (FIQ) such that they can do something
296   * useful, we have to disable the operating system support for FIQs.  Having
297   * operating system support for them would require that FIQs are disabled
298   * during critical sections of the operating system and application.  At this
299   * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
300   * the non critical sections of IRQs, so here they would have a small
301   * advantage.  Without operating system support, the FIQs can execute at any
302   * time (of course not during the service of another FIQ). If someone needs
303   * operating system support for a FIQ, she can trigger a software interrupt and
304   * service the request in a two-step process.
305   */
306#if __ARM_ARCH >= 7
307  __asm__ volatile (
308    "mrs %0, cpsr\n"
309    "cpsid i\n"
310    "isb"
311    : "=&r" (level)
312  );
313#else
314  uint32_t arm_switch_reg;
315
316  __asm__ volatile (
317    ARM_SWITCH_TO_ARM
318    "mrs %[level], cpsr\n"
319    "orr %[arm_switch_reg], %[level], #0x80\n"
320    "msr cpsr, %[arm_switch_reg]\n"
321    ARM_SWITCH_BACK
322    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
323  );
324#endif
325#elif defined(ARM_MULTILIB_ARCH_V7M)
326  uint32_t basepri = 0x80;
327
328  __asm__ volatile (
329    "mrs %[level], basepri\n"
330    "msr basepri_max, %[basepri]\n"
331    : [level] "=&r" (level)
332    : [basepri] "r" (basepri)
333  );
334#endif
335
336  return level;
337}
338
339static inline void arm_interrupt_enable( uint32_t level )
340{
341#if defined(ARM_MULTILIB_ARCH_V4)
342  ARM_SWITCH_REGISTERS;
343
344  __asm__ volatile (
345    ARM_SWITCH_TO_ARM
346    "msr cpsr, %[level]\n"
347    ARM_SWITCH_BACK
348    : ARM_SWITCH_OUTPUT
349    : [level] "r" (level)
350  );
351#elif defined(ARM_MULTILIB_ARCH_V7M)
352  __asm__ volatile (
353    "msr basepri, %[level]\n"
354    :
355    : [level] "r" (level)
356  );
357#endif
358}
359
360static inline void arm_interrupt_flash( uint32_t level )
361{
362#if defined(ARM_MULTILIB_ARCH_V4)
363  uint32_t arm_switch_reg;
364
365  __asm__ volatile (
366    ARM_SWITCH_TO_ARM
367    "mrs %[arm_switch_reg], cpsr\n"
368    "msr cpsr, %[level]\n"
369    "msr cpsr, %[arm_switch_reg]\n"
370    ARM_SWITCH_BACK
371    : [arm_switch_reg] "=&r" (arm_switch_reg)
372    : [level] "r" (level)
373  );
374#elif defined(ARM_MULTILIB_ARCH_V7M)
375  uint32_t basepri;
376
377  __asm__ volatile (
378    "mrs %[basepri], basepri\n"
379    "msr basepri, %[level]\n"
380    "msr basepri, %[basepri]\n"
381    : [basepri] "=&r" (basepri)
382    : [level] "r" (level)
383  );
384#endif
385}
386#endif  /* !ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE */
387
388#define _CPU_ISR_Disable( _isr_cookie ) \
389  do { \
390    _isr_cookie = arm_interrupt_disable(); \
391  } while (0)
392
393#define _CPU_ISR_Enable( _isr_cookie )  \
394  arm_interrupt_enable( _isr_cookie )
395
396#define _CPU_ISR_Flash( _isr_cookie ) \
397  arm_interrupt_flash( _isr_cookie )
398
399static inline bool _CPU_ISR_Is_enabled( uint32_t level )
400{
401#if defined(ARM_MULTILIB_ARCH_V4)
402  return ( level & 0x80 ) == 0;
403#elif defined(ARM_MULTILIB_ARCH_V7M)
404  return level == 0;
405#endif
406}
407
408void _CPU_ISR_Set_level( uint32_t level );
409
410uint32_t _CPU_ISR_Get_level( void );
411
412void _CPU_Context_Initialize(
413  Context_Control *the_context,
414  void *stack_area_begin,
415  size_t stack_area_size,
416  uint32_t new_level,
417  void (*entry_point)( void ),
418  bool is_fp,
419  void *tls_area
420);
421
422#define _CPU_Context_Get_SP( _context ) \
423  (_context)->register_sp
424
425#ifdef RTEMS_SMP
426  static inline bool _CPU_Context_Get_is_executing(
427    const Context_Control *context
428  )
429  {
430    return context->is_executing;
431  }
432
433  static inline void _CPU_Context_Set_is_executing(
434    Context_Control *context,
435    bool is_executing
436  )
437  {
438    context->is_executing = is_executing;
439  }
440
441  RTEMS_NO_RETURN void _ARM_Start_multitasking( Context_Control *heir );
442
443  #define _CPU_Start_multitasking( _heir ) _ARM_Start_multitasking( _heir )
444#endif
445
446#define _CPU_Context_Restart_self( _the_context ) \
447   _CPU_Context_restore( (_the_context) );
448
449#define _CPU_Context_Initialize_fp( _destination ) \
450  do { \
451    *(*(_destination)) = _CPU_Null_fp_context; \
452  } while (0)
453
454/**
455 * @brief CPU initialization.
456 */
457void _CPU_Initialize( void );
458
459typedef void ( *CPU_ISR_handler )( void );
460
461void _CPU_ISR_install_vector(
462  uint32_t         vector,
463  CPU_ISR_handler  new_handler,
464  CPU_ISR_handler *old_handler
465);
466
467/**
468 * @brief CPU switch context.
469 */
470void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
471
472RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
473  Context_Control *executing,
474  Context_Control *heir
475);
476
477RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
478
479#if defined(ARM_MULTILIB_ARCH_V7M)
480  RTEMS_NO_RETURN void _ARMV7M_Start_multitasking( Context_Control *heir );
481  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
482#endif
483
484#ifdef RTEMS_SMP
485  uint32_t _CPU_SMP_Initialize( void );
486
487  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
488
489  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
490
491  void _CPU_SMP_Prepare_start_multitasking( void );
492
493  static inline uint32_t _CPU_SMP_Get_current_processor( void )
494  {
495    uint32_t mpidr;
496
497    /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
498    __asm__ volatile (
499      "mrc p15, 0, %[mpidr], c0, c0, 5\n"
500      : [mpidr] "=&r" (mpidr)
501    );
502
503    return mpidr & 0xffU;
504  }
505
506  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
507
508  static inline void _ARM_Send_event( void )
509  {
510    __asm__ volatile ( "sev" : : : "memory" );
511  }
512
513  static inline void _ARM_Wait_for_event( void )
514  {
515    __asm__ volatile ( "wfe" : : : "memory" );
516  }
517#endif
518
519
520static inline uint32_t CPU_swap_u32( uint32_t value )
521{
522#if defined(__thumb2__)
523  __asm__ volatile (
524    "rev %0, %0"
525    : "=r" (value)
526    : "0" (value)
527  );
528  return value;
529#elif defined(__thumb__)
530  uint32_t byte1, byte2, byte3, byte4, swapped;
531
532  byte4 = (value >> 24) & 0xff;
533  byte3 = (value >> 16) & 0xff;
534  byte2 = (value >> 8)  & 0xff;
535  byte1 =  value & 0xff;
536
537  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
538  return swapped;
539#else
540  uint32_t tmp = value; /* make compiler warnings go away */
541  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
542                "BIC %1, %1, #0xff0000\n"
543                "MOV %0, %0, ROR #8\n"
544                "EOR %0, %0, %1, LSR #8\n"
545                : "=r" (value), "=r" (tmp)
546                : "0" (value), "1" (tmp));
547  return value;
548#endif
549}
550
551static inline uint16_t CPU_swap_u16( uint16_t value )
552{
553#if defined(__thumb2__)
554  __asm__ volatile (
555    "rev16 %0, %0"
556    : "=r" (value)
557    : "0" (value)
558  );
559  return value;
560#else
561  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
562#endif
563}
564
565typedef uint32_t CPU_Counter_ticks;
566
567uint32_t _CPU_Counter_frequency( void );
568
569CPU_Counter_ticks _CPU_Counter_read( void );
570
571void *_CPU_Thread_Idle_body( uintptr_t ignored );
572
573#if defined(ARM_MULTILIB_ARCH_V4)
574
575typedef enum {
576  ARM_EXCEPTION_RESET = 0,
577  ARM_EXCEPTION_UNDEF = 1,
578  ARM_EXCEPTION_SWI = 2,
579  ARM_EXCEPTION_PREF_ABORT = 3,
580  ARM_EXCEPTION_DATA_ABORT = 4,
581  ARM_EXCEPTION_RESERVED = 5,
582  ARM_EXCEPTION_IRQ = 6,
583  ARM_EXCEPTION_FIQ = 7,
584  MAX_EXCEPTIONS = 8,
585  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
586} Arm_symbolic_exception_name;
587
588#endif /* defined(ARM_MULTILIB_ARCH_V4) */
589
590typedef struct {
591  uint32_t register_fpexc;
592  uint32_t register_fpscr;
593  uint64_t register_d0;
594  uint64_t register_d1;
595  uint64_t register_d2;
596  uint64_t register_d3;
597  uint64_t register_d4;
598  uint64_t register_d5;
599  uint64_t register_d6;
600  uint64_t register_d7;
601  uint64_t register_d8;
602  uint64_t register_d9;
603  uint64_t register_d10;
604  uint64_t register_d11;
605  uint64_t register_d12;
606  uint64_t register_d13;
607  uint64_t register_d14;
608  uint64_t register_d15;
609  uint64_t register_d16;
610  uint64_t register_d17;
611  uint64_t register_d18;
612  uint64_t register_d19;
613  uint64_t register_d20;
614  uint64_t register_d21;
615  uint64_t register_d22;
616  uint64_t register_d23;
617  uint64_t register_d24;
618  uint64_t register_d25;
619  uint64_t register_d26;
620  uint64_t register_d27;
621  uint64_t register_d28;
622  uint64_t register_d29;
623  uint64_t register_d30;
624  uint64_t register_d31;
625} ARM_VFP_context;
626
627typedef struct {
628  uint32_t register_r0;
629  uint32_t register_r1;
630  uint32_t register_r2;
631  uint32_t register_r3;
632  uint32_t register_r4;
633  uint32_t register_r5;
634  uint32_t register_r6;
635  uint32_t register_r7;
636  uint32_t register_r8;
637  uint32_t register_r9;
638  uint32_t register_r10;
639  uint32_t register_r11;
640  uint32_t register_r12;
641  uint32_t register_sp;
642  void *register_lr;
643  void *register_pc;
644#if defined(ARM_MULTILIB_ARCH_V4)
645  uint32_t register_cpsr;
646  Arm_symbolic_exception_name vector;
647#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
648  uint32_t register_xpsr;
649  uint32_t vector;
650#endif
651  const ARM_VFP_context *vfp_context;
652  uint32_t reserved_for_stack_alignment;
653} CPU_Exception_frame;
654
655void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
656
657void _ARM_Exception_default( CPU_Exception_frame *frame );
658
659/** Type that can store a 32-bit integer or a pointer. */
660typedef uintptr_t CPU_Uint32ptr;
661
662#ifdef __cplusplus
663}
664#endif
665
666#endif /* ASM */
667
668/** @} */
669
670#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.