source: rtems/cpukit/score/cpu/arm/include/rtems/score/cpu.h @ 2afb22b

5
Last change on this file since 2afb22b was 2afb22b, checked in by Chris Johns <chrisj@…>, on 12/23/17 at 07:18:56

Remove make preinstall

A speciality of the RTEMS build system was the make preinstall step. It
copied header files from arbitrary locations into the build tree. The
header files were included via the -Bsome/build/tree/path GCC command
line option.

This has at least seven problems:

  • The make preinstall step itself needs time and disk space.
  • Errors in header files show up in the build tree copy. This makes it hard for editors to open the right file to fix the error.
  • There is no clear relationship between source and build tree header files. This makes an audit of the build process difficult.
  • The visibility of all header files in the build tree makes it difficult to enforce API barriers. For example it is discouraged to use BSP-specifics in the cpukit.
  • An introduction of a new build system is difficult.
  • Include paths specified by the -B option are system headers. This may suppress warnings.
  • The parallel build had sporadic failures on some hosts.

This patch removes the make preinstall step. All installed header
files are moved to dedicated include directories in the source tree.
Let @RTEMS_CPU@ be the target architecture, e.g. arm, powerpc, sparc,
etc. Let @RTEMS_BSP_FAMILIY@ be a BSP family base directory, e.g.
erc32, imx, qoriq, etc.

The new cpukit include directories are:

  • cpukit/include
  • cpukit/score/cpu/@RTEMS_CPU@/include
  • cpukit/libnetworking

The new BSP include directories are:

  • bsps/include
  • bsps/@RTEMS_CPU@/include
  • bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILIY@/include

There are build tree include directories for generated files.

The include directory order favours the most general header file, e.g.
it is not possible to override general header files via the include path
order.

The "bootstrap -p" option was removed. The new "bootstrap -H" option
should be used to regenerate the "headers.am" files.

Update #3254.

  • Property mode set to 100644
File size: 16.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief ARM Architecture Support API
5 */
6
7/*
8 *  This include file contains information pertaining to the ARM
9 *  processor.
10 *
11 *  Copyright (c) 2009, 2017 embedded brains GmbH
12 *
13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
15 *  Copyright (c) 2006 OAR Corporation
16 *
17 *  Copyright (c) 2002 Advent Networks, Inc.
18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
25 *  http://www.rtems.org/license/LICENSE.
26 *
27 */
28
29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
31
32#include <rtems/score/types.h>
33#include <rtems/score/arm.h>
34
35#if defined(ARM_MULTILIB_ARCH_V4)
36
37/**
38 * @defgroup ScoreCPUARM ARM Specific Support
39 *
40 * @ingroup ScoreCPU
41 *
42 * @brief ARM specific support.
43 */
44/**@{**/
45
46#if defined(__thumb__) && !defined(__thumb2__)
47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
52#else
53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
58#endif
59
60/**
61 * @name Program Status Register
62 */
63/**@{**/
64
65#define ARM_PSR_N (1 << 31)
66#define ARM_PSR_Z (1 << 30)
67#define ARM_PSR_C (1 << 29)
68#define ARM_PSR_V (1 << 28)
69#define ARM_PSR_Q (1 << 27)
70#define ARM_PSR_J (1 << 24)
71#define ARM_PSR_GE_SHIFT 16
72#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73#define ARM_PSR_E (1 << 9)
74#define ARM_PSR_A (1 << 8)
75#define ARM_PSR_I (1 << 7)
76#define ARM_PSR_F (1 << 6)
77#define ARM_PSR_T (1 << 5)
78#define ARM_PSR_M_SHIFT 0
79#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80#define ARM_PSR_M_USR 0x10
81#define ARM_PSR_M_FIQ 0x11
82#define ARM_PSR_M_IRQ 0x12
83#define ARM_PSR_M_SVC 0x13
84#define ARM_PSR_M_ABT 0x17
85#define ARM_PSR_M_HYP 0x1a
86#define ARM_PSR_M_UND 0x1b
87#define ARM_PSR_M_SYS 0x1f
88
89/** @} */
90
91/** @} */
92
93#endif /* defined(ARM_MULTILIB_ARCH_V4) */
94
95/**
96 * @addtogroup ScoreCPU
97 */
98/**@{**/
99
100/*
101 *  The ARM uses the PIC interrupt model.
102 */
103#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
104
105#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
106
107#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
108
109#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
110
111#define CPU_ISR_PASSES_FRAME_POINTER FALSE
112
113#define CPU_HARDWARE_FP FALSE
114
115#define CPU_SOFTWARE_FP FALSE
116
117#define CPU_ALL_TASKS_ARE_FP FALSE
118
119#define CPU_IDLE_TASK_IS_FP FALSE
120
121#define CPU_USE_DEFERRED_FP_SWITCH FALSE
122
123#define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
124
125#if defined(ARM_MULTILIB_HAS_WFI)
126  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
127#else
128  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
129#endif
130
131#define CPU_STACK_GROWS_UP FALSE
132
133#if defined(ARM_MULTILIB_CACHE_LINE_MAX_64)
134  #define CPU_CACHE_LINE_BYTES 64
135#else
136  #define CPU_CACHE_LINE_BYTES 32
137#endif
138
139#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
140
141#define CPU_MODES_INTERRUPT_MASK 0x1
142
143#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
144
145#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
146
147#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
148
149#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
150
151/* AAPCS, section 4.1, Fundamental Data Types */
152#define CPU_SIZEOF_POINTER 4
153
154/* AAPCS, section 4.1, Fundamental Data Types */
155#define CPU_ALIGNMENT 8
156
157#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
158
159/* AAPCS, section 4.3.1, Aggregates */
160#define CPU_PARTITION_ALIGNMENT 4
161
162/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
163#define CPU_STACK_ALIGNMENT 8
164
165/*
166 * Bitfield handler macros.
167 *
168 * If we had a particularly fast function for finding the first
169 * bit set in a word, it would go here. Since we don't (*), we'll
170 * just use the universal macros.
171 *
172 * (*) On ARM V5 and later, there's a CLZ function which could be
173 *     used to implement much quicker than the default macro.
174 */
175
176#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
177
178#define CPU_MAXIMUM_PROCESSORS 32
179
180/** @} */
181
182#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
183  #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
184#endif
185
186#ifdef ARM_MULTILIB_VFP
187  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
188#endif
189
190#ifdef ARM_MULTILIB_ARCH_V4
191  #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 40
192#endif
193
194#ifdef RTEMS_SMP
195  #if defined(ARM_MULTILIB_VFP)
196    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
197  #elif defined(ARM_MULTILIB_HAS_THREAD_ID_REGISTER)
198    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
199  #else
200    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 44
201  #endif
202#endif
203
204#define ARM_EXCEPTION_FRAME_SIZE 80
205
206#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
207
208#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
209
210#define ARM_VFP_CONTEXT_SIZE 264
211
212#ifndef ASM
213
214#ifdef __cplusplus
215extern "C" {
216#endif
217
218/**
219 * @addtogroup ScoreCPU
220 */
221/**@{**/
222
223typedef struct {
224#if defined(ARM_MULTILIB_ARCH_V4)
225  uint32_t register_r4;
226  uint32_t register_r5;
227  uint32_t register_r6;
228  uint32_t register_r7;
229  uint32_t register_r8;
230  uint32_t register_r9;
231  uint32_t register_r10;
232  uint32_t register_fp;
233  uint32_t register_sp;
234  uint32_t register_lr;
235  uint32_t isr_dispatch_disable;
236#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
237  uint32_t register_r4;
238  uint32_t register_r5;
239  uint32_t register_r6;
240  uint32_t register_r7;
241  uint32_t register_r8;
242  uint32_t register_r9;
243  uint32_t register_r10;
244  uint32_t register_r11;
245  void *register_lr;
246  void *register_sp;
247  uint32_t isr_nest_level;
248#else
249  void *register_sp;
250#endif
251#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
252  uint32_t thread_id;
253#endif
254#ifdef ARM_MULTILIB_VFP
255  uint64_t register_d8;
256  uint64_t register_d9;
257  uint64_t register_d10;
258  uint64_t register_d11;
259  uint64_t register_d12;
260  uint64_t register_d13;
261  uint64_t register_d14;
262  uint64_t register_d15;
263#endif
264#ifdef RTEMS_SMP
265  volatile bool is_executing;
266#endif
267} Context_Control;
268
269typedef struct {
270  /* Not supported */
271} Context_Control_fp;
272
273static inline void _ARM_Data_memory_barrier( void )
274{
275#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
276  __asm__ volatile ( "dmb" : : : "memory" );
277#else
278  RTEMS_COMPILER_MEMORY_BARRIER();
279#endif
280}
281
282static inline void _ARM_Data_synchronization_barrier( void )
283{
284#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
285  __asm__ volatile ( "dsb" : : : "memory" );
286#else
287  RTEMS_COMPILER_MEMORY_BARRIER();
288#endif
289}
290
291static inline void _ARM_Instruction_synchronization_barrier( void )
292{
293#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
294  __asm__ volatile ( "isb" : : : "memory" );
295#else
296  RTEMS_COMPILER_MEMORY_BARRIER();
297#endif
298}
299
300static inline uint32_t arm_interrupt_disable( void )
301{
302  uint32_t level;
303
304#if defined(ARM_MULTILIB_ARCH_V4)
305  uint32_t arm_switch_reg;
306
307  /*
308   * Disable only normal interrupts (IRQ).
309   *
310   * In order to support fast interrupts (FIQ) such that they can do something
311   * useful, we have to disable the operating system support for FIQs.  Having
312   * operating system support for them would require that FIQs are disabled
313   * during critical sections of the operating system and application.  At this
314   * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
315   * the non critical sections of IRQs, so here they would have a small
316   * advantage.  Without operating system support, the FIQs can execute at any
317   * time (of course not during the service of another FIQ). If someone needs
318   * operating system support for a FIQ, she can trigger a software interrupt and
319   * service the request in a two-step process.
320   */
321  __asm__ volatile (
322    ARM_SWITCH_TO_ARM
323    "mrs %[level], cpsr\n"
324    "orr %[arm_switch_reg], %[level], #0x80\n"
325    "msr cpsr, %[arm_switch_reg]\n"
326    ARM_SWITCH_BACK
327    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
328  );
329#elif defined(ARM_MULTILIB_ARCH_V7M)
330  uint32_t basepri = 0x80;
331
332  __asm__ volatile (
333    "mrs %[level], basepri\n"
334    "msr basepri_max, %[basepri]\n"
335    : [level] "=&r" (level)
336    : [basepri] "r" (basepri)
337  );
338#endif
339
340  return level;
341}
342
343static inline void arm_interrupt_enable( uint32_t level )
344{
345#if defined(ARM_MULTILIB_ARCH_V4)
346  ARM_SWITCH_REGISTERS;
347
348  __asm__ volatile (
349    ARM_SWITCH_TO_ARM
350    "msr cpsr, %[level]\n"
351    ARM_SWITCH_BACK
352    : ARM_SWITCH_OUTPUT
353    : [level] "r" (level)
354  );
355#elif defined(ARM_MULTILIB_ARCH_V7M)
356  __asm__ volatile (
357    "msr basepri, %[level]\n"
358    :
359    : [level] "r" (level)
360  );
361#endif
362}
363
364static inline void arm_interrupt_flash( uint32_t level )
365{
366#if defined(ARM_MULTILIB_ARCH_V4)
367  uint32_t arm_switch_reg;
368
369  __asm__ volatile (
370    ARM_SWITCH_TO_ARM
371    "mrs %[arm_switch_reg], cpsr\n"
372    "msr cpsr, %[level]\n"
373    "msr cpsr, %[arm_switch_reg]\n"
374    ARM_SWITCH_BACK
375    : [arm_switch_reg] "=&r" (arm_switch_reg)
376    : [level] "r" (level)
377  );
378#elif defined(ARM_MULTILIB_ARCH_V7M)
379  uint32_t basepri;
380
381  __asm__ volatile (
382    "mrs %[basepri], basepri\n"
383    "msr basepri, %[level]\n"
384    "msr basepri, %[basepri]\n"
385    : [basepri] "=&r" (basepri)
386    : [level] "r" (level)
387  );
388#endif
389}
390
391#define _CPU_ISR_Disable( _isr_cookie ) \
392  do { \
393    _isr_cookie = arm_interrupt_disable(); \
394  } while (0)
395
396#define _CPU_ISR_Enable( _isr_cookie )  \
397  arm_interrupt_enable( _isr_cookie )
398
399#define _CPU_ISR_Flash( _isr_cookie ) \
400  arm_interrupt_flash( _isr_cookie )
401
402RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
403{
404#if defined(ARM_MULTILIB_ARCH_V4)
405  return ( level & 0x80 ) == 0;
406#elif defined(ARM_MULTILIB_ARCH_V7M)
407  return level == 0;
408#endif
409}
410
411void _CPU_ISR_Set_level( uint32_t level );
412
413uint32_t _CPU_ISR_Get_level( void );
414
415void _CPU_Context_Initialize(
416  Context_Control *the_context,
417  void *stack_area_begin,
418  size_t stack_area_size,
419  uint32_t new_level,
420  void (*entry_point)( void ),
421  bool is_fp,
422  void *tls_area
423);
424
425#define _CPU_Context_Get_SP( _context ) \
426  (_context)->register_sp
427
428#ifdef RTEMS_SMP
429  static inline bool _CPU_Context_Get_is_executing(
430    const Context_Control *context
431  )
432  {
433    return context->is_executing;
434  }
435
436  static inline void _CPU_Context_Set_is_executing(
437    Context_Control *context,
438    bool is_executing
439  )
440  {
441    context->is_executing = is_executing;
442  }
443#endif
444
445#define _CPU_Context_Restart_self( _the_context ) \
446   _CPU_Context_restore( (_the_context) );
447
448#define _CPU_Context_Initialize_fp( _destination ) \
449  do { \
450    *(*(_destination)) = _CPU_Null_fp_context; \
451  } while (0)
452
453#define _CPU_Fatal_halt( _source, _err )    \
454   do {                                     \
455     uint32_t _level;                       \
456     uint32_t _error = _err;                \
457     _CPU_ISR_Disable( _level );            \
458     (void) _level;                         \
459     __asm__ volatile ("mov r0, %0\n"       \
460                   : "=r" (_error)          \
461                   : "0" (_error)           \
462                   : "r0" );                \
463     while (1);                             \
464   } while (0);
465
466/**
467 * @brief CPU initialization.
468 */
469void _CPU_Initialize( void );
470
471void _CPU_ISR_install_vector(
472  uint32_t vector,
473  proc_ptr new_handler,
474  proc_ptr *old_handler
475);
476
477/**
478 * @brief CPU switch context.
479 */
480void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
481
482void _CPU_Context_restore( Context_Control *new_context )
483  RTEMS_NO_RETURN;
484
485#if defined(ARM_MULTILIB_ARCH_V7M)
486  void _ARMV7M_Start_multitasking( Context_Control *heir )
487    RTEMS_NO_RETURN;
488  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
489#endif
490
491void _CPU_Context_volatile_clobber( uintptr_t pattern );
492
493void _CPU_Context_validate( uintptr_t pattern );
494
495#ifdef RTEMS_SMP
496  uint32_t _CPU_SMP_Initialize( void );
497
498  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
499
500  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
501
502  void _CPU_SMP_Prepare_start_multitasking( void );
503
504  static inline uint32_t _CPU_SMP_Get_current_processor( void )
505  {
506    uint32_t mpidr;
507
508    /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
509    __asm__ volatile (
510      "mrc p15, 0, %[mpidr], c0, c0, 5\n"
511      : [mpidr] "=&r" (mpidr)
512    );
513
514    return mpidr & 0xffU;
515  }
516
517  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
518
519  static inline void _ARM_Send_event( void )
520  {
521    __asm__ volatile ( "sev" : : : "memory" );
522  }
523
524  static inline void _ARM_Wait_for_event( void )
525  {
526    __asm__ volatile ( "wfe" : : : "memory" );
527  }
528
529  static inline void _CPU_SMP_Processor_event_broadcast( void )
530  {
531    _ARM_Data_synchronization_barrier();
532    _ARM_Send_event();
533  }
534
535  static inline void _CPU_SMP_Processor_event_receive( void )
536  {
537    _ARM_Wait_for_event();
538    _ARM_Data_memory_barrier();
539  }
540#endif
541
542
543static inline uint32_t CPU_swap_u32( uint32_t value )
544{
545#if defined(__thumb2__)
546  __asm__ volatile (
547    "rev %0, %0"
548    : "=r" (value)
549    : "0" (value)
550  );
551  return value;
552#elif defined(__thumb__)
553  uint32_t byte1, byte2, byte3, byte4, swapped;
554
555  byte4 = (value >> 24) & 0xff;
556  byte3 = (value >> 16) & 0xff;
557  byte2 = (value >> 8)  & 0xff;
558  byte1 =  value & 0xff;
559
560  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
561  return swapped;
562#else
563  uint32_t tmp = value; /* make compiler warnings go away */
564  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
565                "BIC %1, %1, #0xff0000\n"
566                "MOV %0, %0, ROR #8\n"
567                "EOR %0, %0, %1, LSR #8\n"
568                : "=r" (value), "=r" (tmp)
569                : "0" (value), "1" (tmp));
570  return value;
571#endif
572}
573
574static inline uint16_t CPU_swap_u16( uint16_t value )
575{
576#if defined(__thumb2__)
577  __asm__ volatile (
578    "rev16 %0, %0"
579    : "=r" (value)
580    : "0" (value)
581  );
582  return value;
583#else
584  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
585#endif
586}
587
588typedef uint32_t CPU_Counter_ticks;
589
590CPU_Counter_ticks _CPU_Counter_read( void );
591
592CPU_Counter_ticks _CPU_Counter_difference(
593  CPU_Counter_ticks second,
594  CPU_Counter_ticks first
595);
596
597#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
598  void *_CPU_Thread_Idle_body( uintptr_t ignored );
599#endif
600
601/** @} */
602
603/**
604 * @addtogroup ScoreCPUARM
605 */
606/**@{**/
607
608#if defined(ARM_MULTILIB_ARCH_V4)
609
610typedef enum {
611  ARM_EXCEPTION_RESET = 0,
612  ARM_EXCEPTION_UNDEF = 1,
613  ARM_EXCEPTION_SWI = 2,
614  ARM_EXCEPTION_PREF_ABORT = 3,
615  ARM_EXCEPTION_DATA_ABORT = 4,
616  ARM_EXCEPTION_RESERVED = 5,
617  ARM_EXCEPTION_IRQ = 6,
618  ARM_EXCEPTION_FIQ = 7,
619  MAX_EXCEPTIONS = 8,
620  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
621} Arm_symbolic_exception_name;
622
623#endif /* defined(ARM_MULTILIB_ARCH_V4) */
624
625typedef struct {
626  uint32_t register_fpexc;
627  uint32_t register_fpscr;
628  uint64_t register_d0;
629  uint64_t register_d1;
630  uint64_t register_d2;
631  uint64_t register_d3;
632  uint64_t register_d4;
633  uint64_t register_d5;
634  uint64_t register_d6;
635  uint64_t register_d7;
636  uint64_t register_d8;
637  uint64_t register_d9;
638  uint64_t register_d10;
639  uint64_t register_d11;
640  uint64_t register_d12;
641  uint64_t register_d13;
642  uint64_t register_d14;
643  uint64_t register_d15;
644  uint64_t register_d16;
645  uint64_t register_d17;
646  uint64_t register_d18;
647  uint64_t register_d19;
648  uint64_t register_d20;
649  uint64_t register_d21;
650  uint64_t register_d22;
651  uint64_t register_d23;
652  uint64_t register_d24;
653  uint64_t register_d25;
654  uint64_t register_d26;
655  uint64_t register_d27;
656  uint64_t register_d28;
657  uint64_t register_d29;
658  uint64_t register_d30;
659  uint64_t register_d31;
660} ARM_VFP_context;
661
662typedef struct {
663  uint32_t register_r0;
664  uint32_t register_r1;
665  uint32_t register_r2;
666  uint32_t register_r3;
667  uint32_t register_r4;
668  uint32_t register_r5;
669  uint32_t register_r6;
670  uint32_t register_r7;
671  uint32_t register_r8;
672  uint32_t register_r9;
673  uint32_t register_r10;
674  uint32_t register_r11;
675  uint32_t register_r12;
676  uint32_t register_sp;
677  void *register_lr;
678  void *register_pc;
679#if defined(ARM_MULTILIB_ARCH_V4)
680  uint32_t register_cpsr;
681  Arm_symbolic_exception_name vector;
682#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
683  uint32_t register_xpsr;
684  uint32_t vector;
685#endif
686  const ARM_VFP_context *vfp_context;
687  uint32_t reserved_for_stack_alignment;
688} CPU_Exception_frame;
689
690void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
691
692void _ARM_Exception_default( CPU_Exception_frame *frame );
693
694/** @} */
695
696#ifdef __cplusplus
697}
698#endif
699
700#endif /* ASM */
701
702#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.