source: rtems/cpukit/score/cpu/arm/rtems/score/cpu.h @ bd0d585

5
Last change on this file since bd0d585 was bd0d585, checked in by Sebastian Huber <sebastian.huber@…>, on 11/24/16 at 10:53:59

arm: Fix _CPU_ISR_Is_enabled() for ARMv7-M

Update #2811.

  • Property mode set to 100644
File size: 16.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief ARM Architecture Support API
5 */
6
7/*
8 *  This include file contains information pertaining to the ARM
9 *  processor.
10 *
11 *  Copyright (c) 2009, 2016 embedded brains GmbH
12 *
13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
15 *  Copyright (c) 2006 OAR Corporation
16 *
17 *  Copyright (c) 2002 Advent Networks, Inc.
18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
25 *  http://www.rtems.org/license/LICENSE.
26 *
27 */
28
29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
31
32#include <rtems/score/types.h>
33#include <rtems/score/arm.h>
34
35#if defined(ARM_MULTILIB_ARCH_V4)
36
37/**
38 * @defgroup ScoreCPUARM ARM Specific Support
39 *
40 * @ingroup ScoreCPU
41 *
42 * @brief ARM specific support.
43 */
44/**@{**/
45
46#if defined(__thumb__) && !defined(__thumb2__)
47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
52#else
53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
58#endif
59
60/**
61 * @name Program Status Register
62 */
63/**@{**/
64
65#define ARM_PSR_N (1 << 31)
66#define ARM_PSR_Z (1 << 30)
67#define ARM_PSR_C (1 << 29)
68#define ARM_PSR_V (1 << 28)
69#define ARM_PSR_Q (1 << 27)
70#define ARM_PSR_J (1 << 24)
71#define ARM_PSR_GE_SHIFT 16
72#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73#define ARM_PSR_E (1 << 9)
74#define ARM_PSR_A (1 << 8)
75#define ARM_PSR_I (1 << 7)
76#define ARM_PSR_F (1 << 6)
77#define ARM_PSR_T (1 << 5)
78#define ARM_PSR_M_SHIFT 0
79#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80#define ARM_PSR_M_USR 0x10
81#define ARM_PSR_M_FIQ 0x11
82#define ARM_PSR_M_IRQ 0x12
83#define ARM_PSR_M_SVC 0x13
84#define ARM_PSR_M_ABT 0x17
85#define ARM_PSR_M_HYP 0x1a
86#define ARM_PSR_M_UND 0x1b
87#define ARM_PSR_M_SYS 0x1f
88
89/** @} */
90
91/** @} */
92
93#endif /* defined(ARM_MULTILIB_ARCH_V4) */
94
95/**
96 * @addtogroup ScoreCPU
97 */
98/**@{**/
99
100#if defined(__ARMEL__)
101  #define CPU_BIG_ENDIAN FALSE
102  #define CPU_LITTLE_ENDIAN TRUE
103#elif defined(__ARMEB__)
104  #define CPU_BIG_ENDIAN TRUE
105  #define CPU_LITTLE_ENDIAN FALSE
106#else
107  #error "unknown endianness"
108#endif
109
110/*
111 *  The ARM uses the PIC interrupt model.
112 */
113#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
114
115#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
116
117#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
118
119#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
120
121#define CPU_ISR_PASSES_FRAME_POINTER FALSE
122
123#define CPU_HARDWARE_FP FALSE
124
125#define CPU_SOFTWARE_FP FALSE
126
127#define CPU_ALL_TASKS_ARE_FP FALSE
128
129#define CPU_IDLE_TASK_IS_FP FALSE
130
131#define CPU_USE_DEFERRED_FP_SWITCH FALSE
132
133#if defined(ARM_MULTILIB_ARCH_V7M)
134  #define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
135#else
136  #define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
137#endif
138
139#if defined(ARM_MULTILIB_HAS_WFI)
140  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
141#else
142  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
143#endif
144
145#define CPU_STACK_GROWS_UP FALSE
146
147#if defined(ARM_MULTILIB_CACHE_LINE_MAX_64)
148  #define CPU_CACHE_LINE_BYTES 64
149#else
150  #define CPU_CACHE_LINE_BYTES 32
151#endif
152
153#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
154
155/*
156 * The interrupt mask disables only normal interrupts (IRQ).
157 *
158 * In order to support fast interrupts (FIQ) such that they can do something
159 * useful, we have to disable the operating system support for FIQs.  Having
160 * operating system support for them would require that FIQs are disabled
161 * during critical sections of the operating system and application.  At this
162 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
163 * the non critical sections of IRQs, so here they would have a small
164 * advantage.  Without operating system support, the FIQs can execute at any
165 * time (of course not during the service of another FIQ). If someone needs
166 * operating system support for a FIQ, she can trigger a software interrupt and
167 * service the request in a two-step process.
168 */
169#define CPU_MODES_INTERRUPT_MASK 0x1
170
171#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
172
173#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
174
175#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
176
177#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
178
179/* AAPCS, section 4.1, Fundamental Data Types */
180#define CPU_SIZEOF_POINTER 4
181
182/* AAPCS, section 4.1, Fundamental Data Types */
183#define CPU_ALIGNMENT 8
184
185#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
186
187/* AAPCS, section 4.3.1, Aggregates */
188#define CPU_PARTITION_ALIGNMENT 4
189
190/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
191#define CPU_STACK_ALIGNMENT 8
192
193/*
194 * Bitfield handler macros.
195 *
196 * If we had a particularly fast function for finding the first
197 * bit set in a word, it would go here. Since we don't (*), we'll
198 * just use the universal macros.
199 *
200 * (*) On ARM V5 and later, there's a CLZ function which could be
201 *     used to implement much quicker than the default macro.
202 */
203
204#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
205
206#define CPU_MAXIMUM_PROCESSORS 32
207
208/** @} */
209
210#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
211  #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
212#endif
213
214#ifdef ARM_MULTILIB_VFP
215  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
216#endif
217
218#ifdef ARM_MULTILIB_ARCH_V4
219  #if defined(ARM_MULTILIB_VFP)
220    #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 112
221  #elif defined(ARM_MULTILIB_HAS_THREAD_ID_REGISTER)
222    #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 48
223  #else
224    #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 44
225  #endif
226#endif
227
228#ifdef RTEMS_SMP
229  #ifdef ARM_MULTILIB_VFP
230    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 116
231  #else
232    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 52
233  #endif
234#endif
235
236#define ARM_EXCEPTION_FRAME_SIZE 80
237
238#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
239
240#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
241
242#define ARM_VFP_CONTEXT_SIZE 264
243
244#ifndef ASM
245
246#ifdef __cplusplus
247extern "C" {
248#endif
249
250/**
251 * @addtogroup ScoreCPU
252 */
253/**@{**/
254
255typedef struct {
256#if defined(ARM_MULTILIB_ARCH_V4)
257  uint32_t register_cpsr;
258  uint32_t register_r4;
259  uint32_t register_r5;
260  uint32_t register_r6;
261  uint32_t register_r7;
262  uint32_t register_r8;
263  uint32_t register_r9;
264  uint32_t register_r10;
265  uint32_t register_fp;
266  uint32_t register_sp;
267  uint32_t register_lr;
268#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
269  uint32_t register_r4;
270  uint32_t register_r5;
271  uint32_t register_r6;
272  uint32_t register_r7;
273  uint32_t register_r8;
274  uint32_t register_r9;
275  uint32_t register_r10;
276  uint32_t register_r11;
277  void *register_lr;
278  void *register_sp;
279  uint32_t isr_nest_level;
280#else
281  void *register_sp;
282#endif
283#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
284  uint32_t thread_id;
285#endif
286#ifdef ARM_MULTILIB_VFP
287  uint64_t register_d8;
288  uint64_t register_d9;
289  uint64_t register_d10;
290  uint64_t register_d11;
291  uint64_t register_d12;
292  uint64_t register_d13;
293  uint64_t register_d14;
294  uint64_t register_d15;
295#endif
296#ifdef ARM_MULTILIB_ARCH_V4
297  uint32_t isr_dispatch_disable;
298#endif
299#ifdef RTEMS_SMP
300  volatile bool is_executing;
301#endif
302} Context_Control;
303
304typedef struct {
305  /* Not supported */
306} Context_Control_fp;
307
308extern uint32_t arm_cpu_mode;
309
310static inline void _ARM_Data_memory_barrier( void )
311{
312#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
313  __asm__ volatile ( "dmb" : : : "memory" );
314#else
315  RTEMS_COMPILER_MEMORY_BARRIER();
316#endif
317}
318
319static inline void _ARM_Data_synchronization_barrier( void )
320{
321#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
322  __asm__ volatile ( "dsb" : : : "memory" );
323#else
324  RTEMS_COMPILER_MEMORY_BARRIER();
325#endif
326}
327
328static inline void _ARM_Instruction_synchronization_barrier( void )
329{
330#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
331  __asm__ volatile ( "isb" : : : "memory" );
332#else
333  RTEMS_COMPILER_MEMORY_BARRIER();
334#endif
335}
336
337static inline uint32_t arm_interrupt_disable( void )
338{
339  uint32_t level;
340
341#if defined(ARM_MULTILIB_ARCH_V4)
342  uint32_t arm_switch_reg;
343
344  __asm__ volatile (
345    ARM_SWITCH_TO_ARM
346    "mrs %[level], cpsr\n"
347    "orr %[arm_switch_reg], %[level], #0x80\n"
348    "msr cpsr, %[arm_switch_reg]\n"
349    ARM_SWITCH_BACK
350    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
351  );
352#elif defined(ARM_MULTILIB_ARCH_V7M)
353  uint32_t basepri = 0x80;
354
355  __asm__ volatile (
356    "mrs %[level], basepri\n"
357    "msr basepri_max, %[basepri]\n"
358    : [level] "=&r" (level)
359    : [basepri] "r" (basepri)
360  );
361#endif
362
363  return level;
364}
365
366static inline void arm_interrupt_enable( uint32_t level )
367{
368#if defined(ARM_MULTILIB_ARCH_V4)
369  ARM_SWITCH_REGISTERS;
370
371  __asm__ volatile (
372    ARM_SWITCH_TO_ARM
373    "msr cpsr, %[level]\n"
374    ARM_SWITCH_BACK
375    : ARM_SWITCH_OUTPUT
376    : [level] "r" (level)
377  );
378#elif defined(ARM_MULTILIB_ARCH_V7M)
379  __asm__ volatile (
380    "msr basepri, %[level]\n"
381    :
382    : [level] "r" (level)
383  );
384#endif
385}
386
387static inline void arm_interrupt_flash( uint32_t level )
388{
389#if defined(ARM_MULTILIB_ARCH_V4)
390  uint32_t arm_switch_reg;
391
392  __asm__ volatile (
393    ARM_SWITCH_TO_ARM
394    "mrs %[arm_switch_reg], cpsr\n"
395    "msr cpsr, %[level]\n"
396    "msr cpsr, %[arm_switch_reg]\n"
397    ARM_SWITCH_BACK
398    : [arm_switch_reg] "=&r" (arm_switch_reg)
399    : [level] "r" (level)
400  );
401#elif defined(ARM_MULTILIB_ARCH_V7M)
402  uint32_t basepri;
403
404  __asm__ volatile (
405    "mrs %[basepri], basepri\n"
406    "msr basepri, %[level]\n"
407    "msr basepri, %[basepri]\n"
408    : [basepri] "=&r" (basepri)
409    : [level] "r" (level)
410  );
411#endif
412}
413
414#define _CPU_ISR_Disable( _isr_cookie ) \
415  do { \
416    _isr_cookie = arm_interrupt_disable(); \
417  } while (0)
418
419#define _CPU_ISR_Enable( _isr_cookie )  \
420  arm_interrupt_enable( _isr_cookie )
421
422#define _CPU_ISR_Flash( _isr_cookie ) \
423  arm_interrupt_flash( _isr_cookie )
424
425RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
426{
427#if defined(ARM_MULTILIB_ARCH_V4)
428  return ( level & 0x80 ) == 0;
429#elif defined(ARM_MULTILIB_ARCH_V7M)
430  return level == 0;
431#endif
432}
433
434void _CPU_ISR_Set_level( uint32_t level );
435
436uint32_t _CPU_ISR_Get_level( void );
437
438void _CPU_Context_Initialize(
439  Context_Control *the_context,
440  void *stack_area_begin,
441  size_t stack_area_size,
442  uint32_t new_level,
443  void (*entry_point)( void ),
444  bool is_fp,
445  void *tls_area
446);
447
448#define _CPU_Context_Get_SP( _context ) \
449  (_context)->register_sp
450
451#ifdef RTEMS_SMP
452  static inline bool _CPU_Context_Get_is_executing(
453    const Context_Control *context
454  )
455  {
456    return context->is_executing;
457  }
458
459  static inline void _CPU_Context_Set_is_executing(
460    Context_Control *context,
461    bool is_executing
462  )
463  {
464    context->is_executing = is_executing;
465  }
466#endif
467
468#define _CPU_Context_Restart_self( _the_context ) \
469   _CPU_Context_restore( (_the_context) );
470
471#define _CPU_Context_Fp_start( _base, _offset ) \
472   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
473
474#define _CPU_Context_Initialize_fp( _destination ) \
475  do { \
476    *(*(_destination)) = _CPU_Null_fp_context; \
477  } while (0)
478
479#define _CPU_Fatal_halt( _source, _err )    \
480   do {                                     \
481     uint32_t _level;                       \
482     uint32_t _error = _err;                \
483     _CPU_ISR_Disable( _level );            \
484     (void) _level;                         \
485     __asm__ volatile ("mov r0, %0\n"       \
486                   : "=r" (_error)          \
487                   : "0" (_error)           \
488                   : "r0" );                \
489     while (1);                             \
490   } while (0);
491
492/**
493 * @brief CPU initialization.
494 */
495void _CPU_Initialize( void );
496
497void _CPU_ISR_install_vector(
498  uint32_t vector,
499  proc_ptr new_handler,
500  proc_ptr *old_handler
501);
502
503/**
504 * @brief CPU switch context.
505 */
506void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
507
508void _CPU_Context_restore( Context_Control *new_context )
509  RTEMS_NO_RETURN;
510
511#if defined(ARM_MULTILIB_ARCH_V7M)
512  void _ARMV7M_Start_multitasking( Context_Control *heir )
513    RTEMS_NO_RETURN;
514  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
515#endif
516
517void _CPU_Context_volatile_clobber( uintptr_t pattern );
518
519void _CPU_Context_validate( uintptr_t pattern );
520
521#ifdef RTEMS_SMP
522  uint32_t _CPU_SMP_Initialize( void );
523
524  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
525
526  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
527
528  void _CPU_SMP_Prepare_start_multitasking( void );
529
530  static inline uint32_t _CPU_SMP_Get_current_processor( void )
531  {
532    uint32_t mpidr;
533
534    /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
535    __asm__ volatile (
536      "mrc p15, 0, %[mpidr], c0, c0, 5\n"
537      : [mpidr] "=&r" (mpidr)
538    );
539
540    return mpidr & 0xffU;
541  }
542
543  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
544
545  static inline void _ARM_Send_event( void )
546  {
547    __asm__ volatile ( "sev" : : : "memory" );
548  }
549
550  static inline void _ARM_Wait_for_event( void )
551  {
552    __asm__ volatile ( "wfe" : : : "memory" );
553  }
554
555  static inline void _CPU_SMP_Processor_event_broadcast( void )
556  {
557    _ARM_Data_synchronization_barrier();
558    _ARM_Send_event();
559  }
560
561  static inline void _CPU_SMP_Processor_event_receive( void )
562  {
563    _ARM_Wait_for_event();
564    _ARM_Data_memory_barrier();
565  }
566#endif
567
568
569static inline uint32_t CPU_swap_u32( uint32_t value )
570{
571#if defined(__thumb2__)
572  __asm__ volatile (
573    "rev %0, %0"
574    : "=r" (value)
575    : "0" (value)
576  );
577  return value;
578#elif defined(__thumb__)
579  uint32_t byte1, byte2, byte3, byte4, swapped;
580
581  byte4 = (value >> 24) & 0xff;
582  byte3 = (value >> 16) & 0xff;
583  byte2 = (value >> 8)  & 0xff;
584  byte1 =  value & 0xff;
585
586  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
587  return swapped;
588#else
589  uint32_t tmp = value; /* make compiler warnings go away */
590  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
591                "BIC %1, %1, #0xff0000\n"
592                "MOV %0, %0, ROR #8\n"
593                "EOR %0, %0, %1, LSR #8\n"
594                : "=r" (value), "=r" (tmp)
595                : "0" (value), "1" (tmp));
596  return value;
597#endif
598}
599
600static inline uint16_t CPU_swap_u16( uint16_t value )
601{
602#if defined(__thumb2__)
603  __asm__ volatile (
604    "rev16 %0, %0"
605    : "=r" (value)
606    : "0" (value)
607  );
608  return value;
609#else
610  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
611#endif
612}
613
614typedef uint32_t CPU_Counter_ticks;
615
616CPU_Counter_ticks _CPU_Counter_read( void );
617
618CPU_Counter_ticks _CPU_Counter_difference(
619  CPU_Counter_ticks second,
620  CPU_Counter_ticks first
621);
622
623#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
624  void *_CPU_Thread_Idle_body( uintptr_t ignored );
625#endif
626
627/** @} */
628
629/**
630 * @addtogroup ScoreCPUARM
631 */
632/**@{**/
633
634#if defined(ARM_MULTILIB_ARCH_V4)
635
636typedef enum {
637  ARM_EXCEPTION_RESET = 0,
638  ARM_EXCEPTION_UNDEF = 1,
639  ARM_EXCEPTION_SWI = 2,
640  ARM_EXCEPTION_PREF_ABORT = 3,
641  ARM_EXCEPTION_DATA_ABORT = 4,
642  ARM_EXCEPTION_RESERVED = 5,
643  ARM_EXCEPTION_IRQ = 6,
644  ARM_EXCEPTION_FIQ = 7,
645  MAX_EXCEPTIONS = 8,
646  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
647} Arm_symbolic_exception_name;
648
649#endif /* defined(ARM_MULTILIB_ARCH_V4) */
650
651typedef struct {
652  uint32_t register_fpexc;
653  uint32_t register_fpscr;
654  uint64_t register_d0;
655  uint64_t register_d1;
656  uint64_t register_d2;
657  uint64_t register_d3;
658  uint64_t register_d4;
659  uint64_t register_d5;
660  uint64_t register_d6;
661  uint64_t register_d7;
662  uint64_t register_d8;
663  uint64_t register_d9;
664  uint64_t register_d10;
665  uint64_t register_d11;
666  uint64_t register_d12;
667  uint64_t register_d13;
668  uint64_t register_d14;
669  uint64_t register_d15;
670  uint64_t register_d16;
671  uint64_t register_d17;
672  uint64_t register_d18;
673  uint64_t register_d19;
674  uint64_t register_d20;
675  uint64_t register_d21;
676  uint64_t register_d22;
677  uint64_t register_d23;
678  uint64_t register_d24;
679  uint64_t register_d25;
680  uint64_t register_d26;
681  uint64_t register_d27;
682  uint64_t register_d28;
683  uint64_t register_d29;
684  uint64_t register_d30;
685  uint64_t register_d31;
686} ARM_VFP_context;
687
688typedef struct {
689  uint32_t register_r0;
690  uint32_t register_r1;
691  uint32_t register_r2;
692  uint32_t register_r3;
693  uint32_t register_r4;
694  uint32_t register_r5;
695  uint32_t register_r6;
696  uint32_t register_r7;
697  uint32_t register_r8;
698  uint32_t register_r9;
699  uint32_t register_r10;
700  uint32_t register_r11;
701  uint32_t register_r12;
702  uint32_t register_sp;
703  void *register_lr;
704  void *register_pc;
705#if defined(ARM_MULTILIB_ARCH_V4)
706  uint32_t register_cpsr;
707  Arm_symbolic_exception_name vector;
708#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
709  uint32_t register_xpsr;
710  uint32_t vector;
711#endif
712  const ARM_VFP_context *vfp_context;
713  uint32_t reserved_for_stack_alignment;
714} CPU_Exception_frame;
715
716void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
717
718void _ARM_Exception_default( CPU_Exception_frame *frame );
719
720/*
721 * FIXME: In case your BSP uses this function, then convert it to use
722 * the shared start.S file for ARM.
723 */
724void rtems_exception_init_mngt( void );
725
726/** @} */
727
728#ifdef __cplusplus
729}
730#endif
731
732#endif /* ASM */
733
734#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.