source: rtems/cpukit/score/cpu/arm/rtems/score/cpu.h @ 75a6584

4.11
Last change on this file since 75a6584 was 75a6584, checked in by Sebastian Huber <sebastian.huber@…>, on 09/23/16 at 04:52:33

score: Fix C/C++ compatibility issue

Only use CPU_Per_CPU_control if it contains at least one filed. In GNU
C empty structures have a size of zero. In C++ structures have a
non-zero size. In case CPU_PER_CPU_CONTROL_SIZE is defined to zero,
then this structure is not used anymore.

Close #2789.

  • Property mode set to 100644
File size: 16.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief ARM Architecture Support API
5 */
6
7/*
8 *  This include file contains information pertaining to the ARM
9 *  processor.
10 *
11 *  Copyright (c) 2009-2015 embedded brains GmbH.
12 *
13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
15 *  Copyright (c) 2006 OAR Corporation
16 *
17 *  Copyright (c) 2002 Advent Networks, Inc.
18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
25 *  http://www.rtems.org/license/LICENSE.
26 *
27 */
28
29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
31
32#include <rtems/score/types.h>
33#include <rtems/score/arm.h>
34
35#if defined(ARM_MULTILIB_ARCH_V4)
36
37/**
38 * @defgroup ScoreCPUARM ARM Specific Support
39 *
40 * @ingroup ScoreCPU
41 *
42 * @brief ARM specific support.
43 */
44/**@{**/
45
46#if defined(__thumb__) && !defined(__thumb2__)
47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
52#else
53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
58#endif
59
60/**
61 * @name Program Status Register
62 */
63/**@{**/
64
65#define ARM_PSR_N (1 << 31)
66#define ARM_PSR_Z (1 << 30)
67#define ARM_PSR_C (1 << 29)
68#define ARM_PSR_V (1 << 28)
69#define ARM_PSR_Q (1 << 27)
70#define ARM_PSR_J (1 << 24)
71#define ARM_PSR_GE_SHIFT 16
72#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73#define ARM_PSR_E (1 << 9)
74#define ARM_PSR_A (1 << 8)
75#define ARM_PSR_I (1 << 7)
76#define ARM_PSR_F (1 << 6)
77#define ARM_PSR_T (1 << 5)
78#define ARM_PSR_M_SHIFT 0
79#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80#define ARM_PSR_M_USR 0x10
81#define ARM_PSR_M_FIQ 0x11
82#define ARM_PSR_M_IRQ 0x12
83#define ARM_PSR_M_SVC 0x13
84#define ARM_PSR_M_ABT 0x17
85#define ARM_PSR_M_UND 0x1b
86#define ARM_PSR_M_SYS 0x1f
87
88/** @} */
89
90/** @} */
91
92#endif /* defined(ARM_MULTILIB_ARCH_V4) */
93
94/**
95 * @addtogroup ScoreCPU
96 */
97/**@{**/
98
99/* If someone uses THUMB we assume she wants minimal code size */
100#ifdef __thumb__
101  #define CPU_INLINE_ENABLE_DISPATCH FALSE
102#else
103  #define CPU_INLINE_ENABLE_DISPATCH TRUE
104#endif
105
106#if defined(__ARMEL__)
107  #define CPU_BIG_ENDIAN FALSE
108  #define CPU_LITTLE_ENDIAN TRUE
109#elif defined(__ARMEB__)
110  #define CPU_BIG_ENDIAN TRUE
111  #define CPU_LITTLE_ENDIAN FALSE
112#else
113  #error "unknown endianness"
114#endif
115
116/*
117 *  The ARM uses the PIC interrupt model.
118 */
119#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
120
121#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
122
123#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
124
125#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
126
127#define CPU_ISR_PASSES_FRAME_POINTER 0
128
129#define CPU_HARDWARE_FP FALSE
130
131#define CPU_SOFTWARE_FP FALSE
132
133#define CPU_ALL_TASKS_ARE_FP FALSE
134
135#define CPU_IDLE_TASK_IS_FP FALSE
136
137#define CPU_USE_DEFERRED_FP_SWITCH FALSE
138
139#if defined(ARM_MULTILIB_HAS_WFI)
140  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
141#else
142  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
143#endif
144
145#define CPU_STACK_GROWS_UP FALSE
146
147/* XXX Why 32? */
148#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
149
150#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC TRUE
151
152/*
153 * The interrupt mask disables only normal interrupts (IRQ).
154 *
155 * In order to support fast interrupts (FIQ) such that they can do something
156 * useful, we have to disable the operating system support for FIQs.  Having
157 * operating system support for them would require that FIQs are disabled
158 * during critical sections of the operating system and application.  At this
159 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
160 * the non critical sections of IRQs, so here they would have a small
161 * advantage.  Without operating system support, the FIQs can execute at any
162 * time (of course not during the service of another FIQ). If someone needs
163 * operating system support for a FIQ, she can trigger a software interrupt and
164 * service the request in a two-step process.
165 */
166#define CPU_MODES_INTERRUPT_MASK 0x1
167
168#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
169
170#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
171
172#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
173
174#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
175
176/* AAPCS, section 4.1, Fundamental Data Types */
177#define CPU_SIZEOF_POINTER 4
178
179/* AAPCS, section 4.1, Fundamental Data Types */
180#define CPU_ALIGNMENT 8
181
182#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
183
184/* AAPCS, section 4.3.1, Aggregates */
185#define CPU_PARTITION_ALIGNMENT 4
186
187/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
188#define CPU_STACK_ALIGNMENT 8
189
190/*
191 * Bitfield handler macros.
192 *
193 * If we had a particularly fast function for finding the first
194 * bit set in a word, it would go here. Since we don't (*), we'll
195 * just use the universal macros.
196 *
197 * (*) On ARM V5 and later, there's a CLZ function which could be
198 *     used to implement much quicker than the default macro.
199 */
200
201#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
202
203#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
204
205#define CPU_PER_CPU_CONTROL_SIZE 0
206
207/** @} */
208
209#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
210  #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
211#endif
212
213#ifdef ARM_MULTILIB_VFP
214  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
215#endif
216
217#ifdef RTEMS_SMP
218  #ifdef ARM_MULTILIB_VFP
219    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
220  #else
221    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
222  #endif
223#endif
224
225#define ARM_EXCEPTION_FRAME_SIZE 80
226
227#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
228
229#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
230
231#define ARM_VFP_CONTEXT_SIZE 264
232
233#ifndef ASM
234
235#ifdef __cplusplus
236extern "C" {
237#endif
238
239/**
240 * @addtogroup ScoreCPU
241 */
242/**@{**/
243
244typedef struct {
245#if defined(ARM_MULTILIB_ARCH_V4)
246  uint32_t register_cpsr;
247  uint32_t register_r4;
248  uint32_t register_r5;
249  uint32_t register_r6;
250  uint32_t register_r7;
251  uint32_t register_r8;
252  uint32_t register_r9;
253  uint32_t register_r10;
254  uint32_t register_fp;
255  uint32_t register_sp;
256  uint32_t register_lr;
257#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
258  uint32_t register_r4;
259  uint32_t register_r5;
260  uint32_t register_r6;
261  uint32_t register_r7;
262  uint32_t register_r8;
263  uint32_t register_r9;
264  uint32_t register_r10;
265  uint32_t register_r11;
266  void *register_lr;
267  void *register_sp;
268  uint32_t isr_nest_level;
269#else
270  void *register_sp;
271#endif
272#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
273  uint32_t thread_id;
274#endif
275#ifdef ARM_MULTILIB_VFP
276  uint64_t register_d8;
277  uint64_t register_d9;
278  uint64_t register_d10;
279  uint64_t register_d11;
280  uint64_t register_d12;
281  uint64_t register_d13;
282  uint64_t register_d14;
283  uint64_t register_d15;
284#endif
285#ifdef RTEMS_SMP
286  volatile bool is_executing;
287#endif
288} Context_Control;
289
290typedef struct {
291  /* Not supported */
292} Context_Control_fp;
293
294extern uint32_t arm_cpu_mode;
295
296static inline void _ARM_Data_memory_barrier( void )
297{
298#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
299  __asm__ volatile ( "dmb" : : : "memory" );
300#else
301  RTEMS_COMPILER_MEMORY_BARRIER();
302#endif
303}
304
305static inline void _ARM_Data_synchronization_barrier( void )
306{
307#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
308  __asm__ volatile ( "dsb" : : : "memory" );
309#else
310  RTEMS_COMPILER_MEMORY_BARRIER();
311#endif
312}
313
314static inline void _ARM_Instruction_synchronization_barrier( void )
315{
316#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
317  __asm__ volatile ( "isb" : : : "memory" );
318#else
319  RTEMS_COMPILER_MEMORY_BARRIER();
320#endif
321}
322
323static inline uint32_t arm_interrupt_disable( void )
324{
325  uint32_t level;
326
327#if defined(ARM_MULTILIB_ARCH_V4)
328  uint32_t arm_switch_reg;
329
330  __asm__ volatile (
331    ARM_SWITCH_TO_ARM
332    "mrs %[level], cpsr\n"
333    "orr %[arm_switch_reg], %[level], #0x80\n"
334    "msr cpsr, %[arm_switch_reg]\n"
335    ARM_SWITCH_BACK
336    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
337  );
338#elif defined(ARM_MULTILIB_ARCH_V7M)
339  uint32_t basepri = 0x80;
340
341  __asm__ volatile (
342    "mrs %[level], basepri\n"
343    "msr basepri_max, %[basepri]\n"
344    : [level] "=&r" (level)
345    : [basepri] "r" (basepri)
346  );
347#else
348  level = 0;
349#endif
350
351  return level;
352}
353
354static inline void arm_interrupt_enable( uint32_t level )
355{
356#if defined(ARM_MULTILIB_ARCH_V4)
357  ARM_SWITCH_REGISTERS;
358
359  __asm__ volatile (
360    ARM_SWITCH_TO_ARM
361    "msr cpsr, %[level]\n"
362    ARM_SWITCH_BACK
363    : ARM_SWITCH_OUTPUT
364    : [level] "r" (level)
365  );
366#elif defined(ARM_MULTILIB_ARCH_V7M)
367  __asm__ volatile (
368    "msr basepri, %[level]\n"
369    :
370    : [level] "r" (level)
371  );
372#endif
373}
374
375static inline void arm_interrupt_flash( uint32_t level )
376{
377#if defined(ARM_MULTILIB_ARCH_V4)
378  uint32_t arm_switch_reg;
379
380  __asm__ volatile (
381    ARM_SWITCH_TO_ARM
382    "mrs %[arm_switch_reg], cpsr\n"
383    "msr cpsr, %[level]\n"
384    "msr cpsr, %[arm_switch_reg]\n"
385    ARM_SWITCH_BACK
386    : [arm_switch_reg] "=&r" (arm_switch_reg)
387    : [level] "r" (level)
388  );
389#elif defined(ARM_MULTILIB_ARCH_V7M)
390  uint32_t basepri;
391
392  __asm__ volatile (
393    "mrs %[basepri], basepri\n"
394    "msr basepri, %[level]\n"
395    "msr basepri, %[basepri]\n"
396    : [basepri] "=&r" (basepri)
397    : [level] "r" (level)
398  );
399#endif
400}
401
402#define _CPU_ISR_Disable( _isr_cookie ) \
403  do { \
404    _isr_cookie = arm_interrupt_disable(); \
405  } while (0)
406
407#define _CPU_ISR_Enable( _isr_cookie )  \
408  arm_interrupt_enable( _isr_cookie )
409
410#define _CPU_ISR_Flash( _isr_cookie ) \
411  arm_interrupt_flash( _isr_cookie )
412
413void _CPU_ISR_Set_level( uint32_t level );
414
415uint32_t _CPU_ISR_Get_level( void );
416
417void _CPU_Context_Initialize(
418  Context_Control *the_context,
419  void *stack_area_begin,
420  size_t stack_area_size,
421  uint32_t new_level,
422  void (*entry_point)( void ),
423  bool is_fp,
424  void *tls_area
425);
426
427#define _CPU_Context_Get_SP( _context ) \
428  (_context)->register_sp
429
430#ifdef RTEMS_SMP
431  static inline bool _CPU_Context_Get_is_executing(
432    const Context_Control *context
433  )
434  {
435    return context->is_executing;
436  }
437
438  static inline void _CPU_Context_Set_is_executing(
439    Context_Control *context,
440    bool is_executing
441  )
442  {
443    context->is_executing = is_executing;
444  }
445#endif
446
447#define _CPU_Context_Restart_self( _the_context ) \
448   _CPU_Context_restore( (_the_context) );
449
450#define _CPU_Context_Fp_start( _base, _offset ) \
451   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
452
453#define _CPU_Context_Initialize_fp( _destination ) \
454  do { \
455    *(*(_destination)) = _CPU_Null_fp_context; \
456  } while (0)
457
458#define _CPU_Fatal_halt( _source, _err )    \
459   do {                                     \
460     uint32_t _level;                       \
461     uint32_t _error = _err;                \
462     _CPU_ISR_Disable( _level );            \
463     (void) _level;                         \
464     __asm__ volatile ("mov r0, %0\n"       \
465                   : "=r" (_error)          \
466                   : "0" (_error)           \
467                   : "r0" );                \
468     while (1);                             \
469   } while (0);
470
471/**
472 * @brief CPU initialization.
473 */
474void _CPU_Initialize( void );
475
476void _CPU_ISR_install_vector(
477  uint32_t vector,
478  proc_ptr new_handler,
479  proc_ptr *old_handler
480);
481
482/**
483 * @brief CPU switch context.
484 */
485void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
486
487void _CPU_Context_restore( Context_Control *new_context )
488  RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
489
490#if defined(ARM_MULTILIB_ARCH_V7M)
491  void _ARMV7M_Start_multitasking( Context_Control *heir )
492    RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
493  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
494#endif
495
496void _CPU_Context_volatile_clobber( uintptr_t pattern );
497
498void _CPU_Context_validate( uintptr_t pattern );
499
500#ifdef RTEMS_SMP
501  uint32_t _CPU_SMP_Initialize( void );
502
503  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
504
505  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
506
507  void _CPU_SMP_Prepare_start_multitasking( void );
508
509  static inline uint32_t _CPU_SMP_Get_current_processor( void )
510  {
511    uint32_t mpidr;
512
513    /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
514    __asm__ volatile (
515      "mrc p15, 0, %[mpidr], c0, c0, 5\n"
516      : [mpidr] "=&r" (mpidr)
517    );
518
519    return mpidr & 0xffU;
520  }
521
522  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
523
524  static inline void _ARM_Send_event( void )
525  {
526    __asm__ volatile ( "sev" : : : "memory" );
527  }
528
529  static inline void _ARM_Wait_for_event( void )
530  {
531    __asm__ volatile ( "wfe" : : : "memory" );
532  }
533
534  static inline void _CPU_SMP_Processor_event_broadcast( void )
535  {
536    _ARM_Data_synchronization_barrier();
537    _ARM_Send_event();
538  }
539
540  static inline void _CPU_SMP_Processor_event_receive( void )
541  {
542    _ARM_Wait_for_event();
543    _ARM_Data_memory_barrier();
544  }
545#endif
546
547
548static inline uint32_t CPU_swap_u32( uint32_t value )
549{
550#if defined(__thumb2__)
551  __asm__ volatile (
552    "rev %0, %0"
553    : "=r" (value)
554    : "0" (value)
555  );
556  return value;
557#elif defined(__thumb__)
558  uint32_t byte1, byte2, byte3, byte4, swapped;
559
560  byte4 = (value >> 24) & 0xff;
561  byte3 = (value >> 16) & 0xff;
562  byte2 = (value >> 8)  & 0xff;
563  byte1 =  value & 0xff;
564
565  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
566  return swapped;
567#else
568  uint32_t tmp = value; /* make compiler warnings go away */
569  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
570                "BIC %1, %1, #0xff0000\n"
571                "MOV %0, %0, ROR #8\n"
572                "EOR %0, %0, %1, LSR #8\n"
573                : "=r" (value), "=r" (tmp)
574                : "0" (value), "1" (tmp));
575  return value;
576#endif
577}
578
579static inline uint16_t CPU_swap_u16( uint16_t value )
580{
581#if defined(__thumb2__)
582  __asm__ volatile (
583    "rev16 %0, %0"
584    : "=r" (value)
585    : "0" (value)
586  );
587  return value;
588#else
589  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
590#endif
591}
592
593typedef uint32_t CPU_Counter_ticks;
594
595CPU_Counter_ticks _CPU_Counter_read( void );
596
597CPU_Counter_ticks _CPU_Counter_difference(
598  CPU_Counter_ticks second,
599  CPU_Counter_ticks first
600);
601
602#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
603  void *_CPU_Thread_Idle_body( uintptr_t ignored );
604#endif
605
606/** @} */
607
608/**
609 * @addtogroup ScoreCPUARM
610 */
611/**@{**/
612
613#if defined(ARM_MULTILIB_ARCH_V4)
614
615typedef enum {
616  ARM_EXCEPTION_RESET = 0,
617  ARM_EXCEPTION_UNDEF = 1,
618  ARM_EXCEPTION_SWI = 2,
619  ARM_EXCEPTION_PREF_ABORT = 3,
620  ARM_EXCEPTION_DATA_ABORT = 4,
621  ARM_EXCEPTION_RESERVED = 5,
622  ARM_EXCEPTION_IRQ = 6,
623  ARM_EXCEPTION_FIQ = 7,
624  MAX_EXCEPTIONS = 8,
625  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
626} Arm_symbolic_exception_name;
627
628#endif /* defined(ARM_MULTILIB_ARCH_V4) */
629
630typedef struct {
631  uint32_t register_fpexc;
632  uint32_t register_fpscr;
633  uint64_t register_d0;
634  uint64_t register_d1;
635  uint64_t register_d2;
636  uint64_t register_d3;
637  uint64_t register_d4;
638  uint64_t register_d5;
639  uint64_t register_d6;
640  uint64_t register_d7;
641  uint64_t register_d8;
642  uint64_t register_d9;
643  uint64_t register_d10;
644  uint64_t register_d11;
645  uint64_t register_d12;
646  uint64_t register_d13;
647  uint64_t register_d14;
648  uint64_t register_d15;
649  uint64_t register_d16;
650  uint64_t register_d17;
651  uint64_t register_d18;
652  uint64_t register_d19;
653  uint64_t register_d20;
654  uint64_t register_d21;
655  uint64_t register_d22;
656  uint64_t register_d23;
657  uint64_t register_d24;
658  uint64_t register_d25;
659  uint64_t register_d26;
660  uint64_t register_d27;
661  uint64_t register_d28;
662  uint64_t register_d29;
663  uint64_t register_d30;
664  uint64_t register_d31;
665} ARM_VFP_context;
666
667typedef struct {
668  uint32_t register_r0;
669  uint32_t register_r1;
670  uint32_t register_r2;
671  uint32_t register_r3;
672  uint32_t register_r4;
673  uint32_t register_r5;
674  uint32_t register_r6;
675  uint32_t register_r7;
676  uint32_t register_r8;
677  uint32_t register_r9;
678  uint32_t register_r10;
679  uint32_t register_r11;
680  uint32_t register_r12;
681  uint32_t register_sp;
682  void *register_lr;
683  void *register_pc;
684#if defined(ARM_MULTILIB_ARCH_V4)
685  uint32_t register_cpsr;
686  Arm_symbolic_exception_name vector;
687#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
688  uint32_t register_xpsr;
689  uint32_t vector;
690#endif
691  const ARM_VFP_context *vfp_context;
692  uint32_t reserved_for_stack_alignment;
693} CPU_Exception_frame;
694
695typedef CPU_Exception_frame CPU_Interrupt_frame;
696
697void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
698
699void _ARM_Exception_default( CPU_Exception_frame *frame );
700
701/*
702 * FIXME: In case your BSP uses this function, then convert it to use
703 * the shared start.S file for ARM.
704 */
705void rtems_exception_init_mngt( void );
706
707/** @} */
708
709#ifdef __cplusplus
710}
711#endif
712
713#endif /* ASM */
714
715#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.