source: rtems/cpukit/score/cpu/arm/include/rtems/score/cpu.h @ c8df844

5
Last change on this file since c8df844 was c8df844, checked in by Sebastian Huber <sebastian.huber@…>, on 06/19/18 at 12:59:51

score: Add CPU_INTERRUPT_STACK_ALIGNMENT

Add CPU port define for the interrupt stack alignment. The alignment
should take the stack ABI and the cache line size into account.

Update #3459.

  • Property mode set to 100644
File size: 16.6 KB
Line 
1/**
2 * @file
3 *
4 * @brief ARM Architecture Support API
5 */
6
7/*
8 *  This include file contains information pertaining to the ARM
9 *  processor.
10 *
11 *  Copyright (c) 2009, 2017 embedded brains GmbH
12 *
13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
15 *  Copyright (c) 2006 OAR Corporation
16 *
17 *  Copyright (c) 2002 Advent Networks, Inc.
18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
25 *  http://www.rtems.org/license/LICENSE.
26 *
27 */
28
29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
31
32#include <rtems/score/basedefs.h>
33#if defined(RTEMS_PARAVIRT)
34#include <rtems/score/paravirt.h>
35#endif
36#include <rtems/score/arm.h>
37
38#if defined(ARM_MULTILIB_ARCH_V4)
39
40/**
41 * @defgroup ScoreCPUARM ARM Specific Support
42 *
43 * @ingroup ScoreCPU
44 *
45 * @brief ARM specific support.
46 */
47/**@{**/
48
49#if defined(__thumb__) && !defined(__thumb2__)
50  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
51  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
52  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
53  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
54  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
55#else
56  #define ARM_SWITCH_REGISTERS
57  #define ARM_SWITCH_TO_ARM
58  #define ARM_SWITCH_BACK
59  #define ARM_SWITCH_OUTPUT
60  #define ARM_SWITCH_ADDITIONAL_OUTPUT
61#endif
62
63/**
64 * @name Program Status Register
65 */
66/**@{**/
67
68#define ARM_PSR_N (1 << 31)
69#define ARM_PSR_Z (1 << 30)
70#define ARM_PSR_C (1 << 29)
71#define ARM_PSR_V (1 << 28)
72#define ARM_PSR_Q (1 << 27)
73#define ARM_PSR_J (1 << 24)
74#define ARM_PSR_GE_SHIFT 16
75#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
76#define ARM_PSR_E (1 << 9)
77#define ARM_PSR_A (1 << 8)
78#define ARM_PSR_I (1 << 7)
79#define ARM_PSR_F (1 << 6)
80#define ARM_PSR_T (1 << 5)
81#define ARM_PSR_M_SHIFT 0
82#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
83#define ARM_PSR_M_USR 0x10
84#define ARM_PSR_M_FIQ 0x11
85#define ARM_PSR_M_IRQ 0x12
86#define ARM_PSR_M_SVC 0x13
87#define ARM_PSR_M_ABT 0x17
88#define ARM_PSR_M_HYP 0x1a
89#define ARM_PSR_M_UND 0x1b
90#define ARM_PSR_M_SYS 0x1f
91
92/** @} */
93
94/** @} */
95
96#endif /* defined(ARM_MULTILIB_ARCH_V4) */
97
98/**
99 * @addtogroup ScoreCPU
100 */
101/**@{**/
102
103/*
104 *  The ARM uses the PIC interrupt model.
105 */
106#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
107
108#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
109
110#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
111
112#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
113
114#define CPU_ISR_PASSES_FRAME_POINTER FALSE
115
116#define CPU_HARDWARE_FP FALSE
117
118#define CPU_SOFTWARE_FP FALSE
119
120#define CPU_ALL_TASKS_ARE_FP FALSE
121
122#define CPU_IDLE_TASK_IS_FP FALSE
123
124#define CPU_USE_DEFERRED_FP_SWITCH FALSE
125
126#define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
127
128#if defined(ARM_MULTILIB_HAS_WFI)
129  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
130#else
131  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
132#endif
133
134#define CPU_STACK_GROWS_UP FALSE
135
136#if defined(ARM_MULTILIB_CACHE_LINE_MAX_64)
137  #define CPU_CACHE_LINE_BYTES 64
138#else
139  #define CPU_CACHE_LINE_BYTES 32
140#endif
141
142#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
143
144#define CPU_MODES_INTERRUPT_MASK 0x1
145
146#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
147
148#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
149
150#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
151
152#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
153
154/* AAPCS, section 4.1, Fundamental Data Types */
155#define CPU_SIZEOF_POINTER 4
156
157/* AAPCS, section 4.1, Fundamental Data Types */
158#define CPU_ALIGNMENT 8
159
160#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
161
162/* AAPCS, section 4.3.1, Aggregates */
163#define CPU_PARTITION_ALIGNMENT 4
164
165/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
166#define CPU_STACK_ALIGNMENT 8
167
168#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
169
170/*
171 * Bitfield handler macros.
172 *
173 * If we had a particularly fast function for finding the first
174 * bit set in a word, it would go here. Since we don't (*), we'll
175 * just use the universal macros.
176 *
177 * (*) On ARM V5 and later, there's a CLZ function which could be
178 *     used to implement much quicker than the default macro.
179 */
180
181#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
182
183#define CPU_MAXIMUM_PROCESSORS 32
184
185/** @} */
186
187#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
188  #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
189#endif
190
191#ifdef ARM_MULTILIB_VFP
192  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
193#endif
194
195#ifdef ARM_MULTILIB_ARCH_V4
196  #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 40
197#endif
198
199#ifdef RTEMS_SMP
200  #if defined(ARM_MULTILIB_VFP)
201    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
202  #elif defined(ARM_MULTILIB_HAS_THREAD_ID_REGISTER)
203    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
204  #else
205    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 44
206  #endif
207#endif
208
209#define ARM_EXCEPTION_FRAME_SIZE 80
210
211#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
212
213#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
214
215#define ARM_VFP_CONTEXT_SIZE 264
216
217#ifndef ASM
218
219#ifdef __cplusplus
220extern "C" {
221#endif
222
223/**
224 * @addtogroup ScoreCPU
225 */
226/**@{**/
227
228typedef struct {
229#if defined(ARM_MULTILIB_ARCH_V4)
230  uint32_t register_r4;
231  uint32_t register_r5;
232  uint32_t register_r6;
233  uint32_t register_r7;
234  uint32_t register_r8;
235  uint32_t register_r9;
236  uint32_t register_r10;
237  uint32_t register_fp;
238  uint32_t register_sp;
239  uint32_t register_lr;
240  uint32_t isr_dispatch_disable;
241#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
242  uint32_t register_r4;
243  uint32_t register_r5;
244  uint32_t register_r6;
245  uint32_t register_r7;
246  uint32_t register_r8;
247  uint32_t register_r9;
248  uint32_t register_r10;
249  uint32_t register_r11;
250  void *register_lr;
251  void *register_sp;
252  uint32_t isr_nest_level;
253#else
254  void *register_sp;
255#endif
256#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
257  uint32_t thread_id;
258#endif
259#ifdef ARM_MULTILIB_VFP
260  uint64_t register_d8;
261  uint64_t register_d9;
262  uint64_t register_d10;
263  uint64_t register_d11;
264  uint64_t register_d12;
265  uint64_t register_d13;
266  uint64_t register_d14;
267  uint64_t register_d15;
268#endif
269#ifdef RTEMS_SMP
270  volatile bool is_executing;
271#endif
272} Context_Control;
273
274typedef struct {
275  /* Not supported */
276} Context_Control_fp;
277
278static inline void _ARM_Data_memory_barrier( void )
279{
280#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
281  __asm__ volatile ( "dmb" : : : "memory" );
282#else
283  RTEMS_COMPILER_MEMORY_BARRIER();
284#endif
285}
286
287static inline void _ARM_Data_synchronization_barrier( void )
288{
289#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
290  __asm__ volatile ( "dsb" : : : "memory" );
291#else
292  RTEMS_COMPILER_MEMORY_BARRIER();
293#endif
294}
295
296static inline void _ARM_Instruction_synchronization_barrier( void )
297{
298#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
299  __asm__ volatile ( "isb" : : : "memory" );
300#else
301  RTEMS_COMPILER_MEMORY_BARRIER();
302#endif
303}
304
305#if defined(ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE)
306uint32_t arm_interrupt_disable( void );
307void arm_interrupt_enable( uint32_t level );
308void arm_interrupt_flash( uint32_t level );
309#else
310static inline uint32_t arm_interrupt_disable( void )
311{
312  uint32_t level;
313
314#if defined(ARM_MULTILIB_ARCH_V4)
315  uint32_t arm_switch_reg;
316
317  /*
318   * Disable only normal interrupts (IRQ).
319   *
320   * In order to support fast interrupts (FIQ) such that they can do something
321   * useful, we have to disable the operating system support for FIQs.  Having
322   * operating system support for them would require that FIQs are disabled
323   * during critical sections of the operating system and application.  At this
324   * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
325   * the non critical sections of IRQs, so here they would have a small
326   * advantage.  Without operating system support, the FIQs can execute at any
327   * time (of course not during the service of another FIQ). If someone needs
328   * operating system support for a FIQ, she can trigger a software interrupt and
329   * service the request in a two-step process.
330   */
331  __asm__ volatile (
332    ARM_SWITCH_TO_ARM
333    "mrs %[level], cpsr\n"
334    "orr %[arm_switch_reg], %[level], #0x80\n"
335    "msr cpsr, %[arm_switch_reg]\n"
336    ARM_SWITCH_BACK
337    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
338  );
339#elif defined(ARM_MULTILIB_ARCH_V7M)
340  uint32_t basepri = 0x80;
341
342  __asm__ volatile (
343    "mrs %[level], basepri\n"
344    "msr basepri_max, %[basepri]\n"
345    : [level] "=&r" (level)
346    : [basepri] "r" (basepri)
347  );
348#endif
349
350  return level;
351}
352
353static inline void arm_interrupt_enable( uint32_t level )
354{
355#if defined(ARM_MULTILIB_ARCH_V4)
356  ARM_SWITCH_REGISTERS;
357
358  __asm__ volatile (
359    ARM_SWITCH_TO_ARM
360    "msr cpsr, %[level]\n"
361    ARM_SWITCH_BACK
362    : ARM_SWITCH_OUTPUT
363    : [level] "r" (level)
364  );
365#elif defined(ARM_MULTILIB_ARCH_V7M)
366  __asm__ volatile (
367    "msr basepri, %[level]\n"
368    :
369    : [level] "r" (level)
370  );
371#endif
372}
373
374static inline void arm_interrupt_flash( uint32_t level )
375{
376#if defined(ARM_MULTILIB_ARCH_V4)
377  uint32_t arm_switch_reg;
378
379  __asm__ volatile (
380    ARM_SWITCH_TO_ARM
381    "mrs %[arm_switch_reg], cpsr\n"
382    "msr cpsr, %[level]\n"
383    "msr cpsr, %[arm_switch_reg]\n"
384    ARM_SWITCH_BACK
385    : [arm_switch_reg] "=&r" (arm_switch_reg)
386    : [level] "r" (level)
387  );
388#elif defined(ARM_MULTILIB_ARCH_V7M)
389  uint32_t basepri;
390
391  __asm__ volatile (
392    "mrs %[basepri], basepri\n"
393    "msr basepri, %[level]\n"
394    "msr basepri, %[basepri]\n"
395    : [basepri] "=&r" (basepri)
396    : [level] "r" (level)
397  );
398#endif
399}
400#endif  /* !ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE */
401
402#define _CPU_ISR_Disable( _isr_cookie ) \
403  do { \
404    _isr_cookie = arm_interrupt_disable(); \
405  } while (0)
406
407#define _CPU_ISR_Enable( _isr_cookie )  \
408  arm_interrupt_enable( _isr_cookie )
409
410#define _CPU_ISR_Flash( _isr_cookie ) \
411  arm_interrupt_flash( _isr_cookie )
412
413RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
414{
415#if defined(ARM_MULTILIB_ARCH_V4)
416  return ( level & 0x80 ) == 0;
417#elif defined(ARM_MULTILIB_ARCH_V7M)
418  return level == 0;
419#endif
420}
421
422void _CPU_ISR_Set_level( uint32_t level );
423
424uint32_t _CPU_ISR_Get_level( void );
425
426void _CPU_Context_Initialize(
427  Context_Control *the_context,
428  void *stack_area_begin,
429  size_t stack_area_size,
430  uint32_t new_level,
431  void (*entry_point)( void ),
432  bool is_fp,
433  void *tls_area
434);
435
436#define _CPU_Context_Get_SP( _context ) \
437  (_context)->register_sp
438
439#ifdef RTEMS_SMP
440  static inline bool _CPU_Context_Get_is_executing(
441    const Context_Control *context
442  )
443  {
444    return context->is_executing;
445  }
446
447  static inline void _CPU_Context_Set_is_executing(
448    Context_Control *context,
449    bool is_executing
450  )
451  {
452    context->is_executing = is_executing;
453  }
454#endif
455
456#define _CPU_Context_Restart_self( _the_context ) \
457   _CPU_Context_restore( (_the_context) );
458
459#define _CPU_Context_Initialize_fp( _destination ) \
460  do { \
461    *(*(_destination)) = _CPU_Null_fp_context; \
462  } while (0)
463
464#define _CPU_Fatal_halt( _source, _err )    \
465   do {                                     \
466     uint32_t _level;                       \
467     uint32_t _error = _err;                \
468     _CPU_ISR_Disable( _level );            \
469     (void) _level;                         \
470     __asm__ volatile ("mov r0, %0\n"       \
471                   : "=r" (_error)          \
472                   : "0" (_error)           \
473                   : "r0" );                \
474     while (1);                             \
475   } while (0);
476
477/**
478 * @brief CPU initialization.
479 */
480void _CPU_Initialize( void );
481
482void _CPU_ISR_install_vector(
483  uint32_t vector,
484  proc_ptr new_handler,
485  proc_ptr *old_handler
486);
487
488/**
489 * @brief CPU switch context.
490 */
491void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
492
493void _CPU_Context_restore( Context_Control *new_context )
494  RTEMS_NO_RETURN;
495
496#if defined(ARM_MULTILIB_ARCH_V7M)
497  void _ARMV7M_Start_multitasking( Context_Control *heir )
498    RTEMS_NO_RETURN;
499  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
500#endif
501
502void _CPU_Context_volatile_clobber( uintptr_t pattern );
503
504void _CPU_Context_validate( uintptr_t pattern );
505
506#ifdef RTEMS_SMP
507  uint32_t _CPU_SMP_Initialize( void );
508
509  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
510
511  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
512
513  void _CPU_SMP_Prepare_start_multitasking( void );
514
515  static inline uint32_t _CPU_SMP_Get_current_processor( void )
516  {
517    uint32_t mpidr;
518
519    /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
520    __asm__ volatile (
521      "mrc p15, 0, %[mpidr], c0, c0, 5\n"
522      : [mpidr] "=&r" (mpidr)
523    );
524
525    return mpidr & 0xffU;
526  }
527
528  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
529
530  static inline void _ARM_Send_event( void )
531  {
532    __asm__ volatile ( "sev" : : : "memory" );
533  }
534
535  static inline void _ARM_Wait_for_event( void )
536  {
537    __asm__ volatile ( "wfe" : : : "memory" );
538  }
539
540  static inline void _CPU_SMP_Processor_event_broadcast( void )
541  {
542    _ARM_Data_synchronization_barrier();
543    _ARM_Send_event();
544  }
545
546  static inline void _CPU_SMP_Processor_event_receive( void )
547  {
548    _ARM_Wait_for_event();
549    _ARM_Data_memory_barrier();
550  }
551#endif
552
553
554static inline uint32_t CPU_swap_u32( uint32_t value )
555{
556#if defined(__thumb2__)
557  __asm__ volatile (
558    "rev %0, %0"
559    : "=r" (value)
560    : "0" (value)
561  );
562  return value;
563#elif defined(__thumb__)
564  uint32_t byte1, byte2, byte3, byte4, swapped;
565
566  byte4 = (value >> 24) & 0xff;
567  byte3 = (value >> 16) & 0xff;
568  byte2 = (value >> 8)  & 0xff;
569  byte1 =  value & 0xff;
570
571  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
572  return swapped;
573#else
574  uint32_t tmp = value; /* make compiler warnings go away */
575  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
576                "BIC %1, %1, #0xff0000\n"
577                "MOV %0, %0, ROR #8\n"
578                "EOR %0, %0, %1, LSR #8\n"
579                : "=r" (value), "=r" (tmp)
580                : "0" (value), "1" (tmp));
581  return value;
582#endif
583}
584
585static inline uint16_t CPU_swap_u16( uint16_t value )
586{
587#if defined(__thumb2__)
588  __asm__ volatile (
589    "rev16 %0, %0"
590    : "=r" (value)
591    : "0" (value)
592  );
593  return value;
594#else
595  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
596#endif
597}
598
599typedef uint32_t CPU_Counter_ticks;
600
601uint32_t _CPU_Counter_frequency( void );
602
603CPU_Counter_ticks _CPU_Counter_read( void );
604
605static inline CPU_Counter_ticks _CPU_Counter_difference(
606  CPU_Counter_ticks second,
607  CPU_Counter_ticks first
608)
609{
610  return second - first;
611}
612
613#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
614  void *_CPU_Thread_Idle_body( uintptr_t ignored );
615#endif
616
617/** @} */
618
619/**
620 * @addtogroup ScoreCPUARM
621 */
622/**@{**/
623
624#if defined(ARM_MULTILIB_ARCH_V4)
625
626typedef enum {
627  ARM_EXCEPTION_RESET = 0,
628  ARM_EXCEPTION_UNDEF = 1,
629  ARM_EXCEPTION_SWI = 2,
630  ARM_EXCEPTION_PREF_ABORT = 3,
631  ARM_EXCEPTION_DATA_ABORT = 4,
632  ARM_EXCEPTION_RESERVED = 5,
633  ARM_EXCEPTION_IRQ = 6,
634  ARM_EXCEPTION_FIQ = 7,
635  MAX_EXCEPTIONS = 8,
636  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
637} Arm_symbolic_exception_name;
638
639#endif /* defined(ARM_MULTILIB_ARCH_V4) */
640
641typedef struct {
642  uint32_t register_fpexc;
643  uint32_t register_fpscr;
644  uint64_t register_d0;
645  uint64_t register_d1;
646  uint64_t register_d2;
647  uint64_t register_d3;
648  uint64_t register_d4;
649  uint64_t register_d5;
650  uint64_t register_d6;
651  uint64_t register_d7;
652  uint64_t register_d8;
653  uint64_t register_d9;
654  uint64_t register_d10;
655  uint64_t register_d11;
656  uint64_t register_d12;
657  uint64_t register_d13;
658  uint64_t register_d14;
659  uint64_t register_d15;
660  uint64_t register_d16;
661  uint64_t register_d17;
662  uint64_t register_d18;
663  uint64_t register_d19;
664  uint64_t register_d20;
665  uint64_t register_d21;
666  uint64_t register_d22;
667  uint64_t register_d23;
668  uint64_t register_d24;
669  uint64_t register_d25;
670  uint64_t register_d26;
671  uint64_t register_d27;
672  uint64_t register_d28;
673  uint64_t register_d29;
674  uint64_t register_d30;
675  uint64_t register_d31;
676} ARM_VFP_context;
677
678typedef struct {
679  uint32_t register_r0;
680  uint32_t register_r1;
681  uint32_t register_r2;
682  uint32_t register_r3;
683  uint32_t register_r4;
684  uint32_t register_r5;
685  uint32_t register_r6;
686  uint32_t register_r7;
687  uint32_t register_r8;
688  uint32_t register_r9;
689  uint32_t register_r10;
690  uint32_t register_r11;
691  uint32_t register_r12;
692  uint32_t register_sp;
693  void *register_lr;
694  void *register_pc;
695#if defined(ARM_MULTILIB_ARCH_V4)
696  uint32_t register_cpsr;
697  Arm_symbolic_exception_name vector;
698#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
699  uint32_t register_xpsr;
700  uint32_t vector;
701#endif
702  const ARM_VFP_context *vfp_context;
703  uint32_t reserved_for_stack_alignment;
704} CPU_Exception_frame;
705
706void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
707
708void _ARM_Exception_default( CPU_Exception_frame *frame );
709
710/** @} */
711
712/** Type that can store a 32-bit integer or a pointer. */
713typedef uintptr_t CPU_Uint32ptr;
714
715#ifdef __cplusplus
716}
717#endif
718
719#endif /* ASM */
720
721#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.