source: rtems/cpukit/score/cpu/arm/include/rtems/score/cpu.h @ 27bbc05

Last change on this file since 27bbc05 was 27bbc05, checked in by Sebastian Huber <sebastian.huber@…>, on Aug 2, 2018 at 12:49:01 PM

score: Remove CPU_PARTITION_ALIGNMENT

Use the CPU_SIZEOF_POINTER alignment instead. The internal alignment
requirement is defined by the use of Chain_Node (consisting of two
pointers) to manage the free chain of partitions.

It seems that previously the condition

CPU_PARTITION_ALIGNMENT >= sizeof(Chain_Node)

was true on all CPU ports. Now, we need an additional check.

Update #3482.

  • Property mode set to 100644
File size: 16.3 KB
Line 
1/**
2 * @file
3 *
4 * @brief ARM Architecture Support API
5 */
6
7/*
8 *  This include file contains information pertaining to the ARM
9 *  processor.
10 *
11 *  Copyright (c) 2009, 2017 embedded brains GmbH
12 *
13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
15 *  Copyright (c) 2006 OAR Corporation
16 *
17 *  Copyright (c) 2002 Advent Networks, Inc.
18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
25 *  http://www.rtems.org/license/LICENSE.
26 *
27 */
28
29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
31
32#include <rtems/score/basedefs.h>
33#if defined(RTEMS_PARAVIRT)
34#include <rtems/score/paravirt.h>
35#endif
36#include <rtems/score/arm.h>
37
38#if defined(ARM_MULTILIB_ARCH_V4)
39
40/**
41 * @defgroup ScoreCPUARM ARM Specific Support
42 *
43 * @ingroup ScoreCPU
44 *
45 * @brief ARM specific support.
46 */
47/**@{**/
48
49#if defined(__thumb__) && !defined(__thumb2__)
50  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
51  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
52  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
53  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
54  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
55#else
56  #define ARM_SWITCH_REGISTERS
57  #define ARM_SWITCH_TO_ARM
58  #define ARM_SWITCH_BACK
59  #define ARM_SWITCH_OUTPUT
60  #define ARM_SWITCH_ADDITIONAL_OUTPUT
61#endif
62
63/**
64 * @name Program Status Register
65 */
66/**@{**/
67
68#define ARM_PSR_N (1 << 31)
69#define ARM_PSR_Z (1 << 30)
70#define ARM_PSR_C (1 << 29)
71#define ARM_PSR_V (1 << 28)
72#define ARM_PSR_Q (1 << 27)
73#define ARM_PSR_J (1 << 24)
74#define ARM_PSR_GE_SHIFT 16
75#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
76#define ARM_PSR_E (1 << 9)
77#define ARM_PSR_A (1 << 8)
78#define ARM_PSR_I (1 << 7)
79#define ARM_PSR_F (1 << 6)
80#define ARM_PSR_T (1 << 5)
81#define ARM_PSR_M_SHIFT 0
82#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
83#define ARM_PSR_M_USR 0x10
84#define ARM_PSR_M_FIQ 0x11
85#define ARM_PSR_M_IRQ 0x12
86#define ARM_PSR_M_SVC 0x13
87#define ARM_PSR_M_ABT 0x17
88#define ARM_PSR_M_HYP 0x1a
89#define ARM_PSR_M_UND 0x1b
90#define ARM_PSR_M_SYS 0x1f
91
92/** @} */
93
94/** @} */
95
96#endif /* defined(ARM_MULTILIB_ARCH_V4) */
97
98/**
99 * @addtogroup ScoreCPU
100 */
101/**@{**/
102
103/*
104 *  The ARM uses the PIC interrupt model.
105 */
106#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
107
108#define CPU_ISR_PASSES_FRAME_POINTER FALSE
109
110#define CPU_HARDWARE_FP FALSE
111
112#define CPU_SOFTWARE_FP FALSE
113
114#define CPU_ALL_TASKS_ARE_FP FALSE
115
116#define CPU_IDLE_TASK_IS_FP FALSE
117
118#define CPU_USE_DEFERRED_FP_SWITCH FALSE
119
120#define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
121
122#if defined(ARM_MULTILIB_HAS_WFI)
123  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
124#else
125  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
126#endif
127
128#define CPU_STACK_GROWS_UP FALSE
129
130#if defined(ARM_MULTILIB_CACHE_LINE_MAX_64)
131  #define CPU_CACHE_LINE_BYTES 64
132#else
133  #define CPU_CACHE_LINE_BYTES 32
134#endif
135
136#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
137
138#define CPU_MODES_INTERRUPT_MASK 0x1
139
140#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
141
142#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
143
144#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
145
146#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
147
148/* AAPCS, section 4.1, Fundamental Data Types */
149#define CPU_SIZEOF_POINTER 4
150
151/* AAPCS, section 4.1, Fundamental Data Types */
152#define CPU_ALIGNMENT 8
153
154#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
155
156/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
157#define CPU_STACK_ALIGNMENT 8
158
159#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
160
161/*
162 * Bitfield handler macros.
163 *
164 * If we had a particularly fast function for finding the first
165 * bit set in a word, it would go here. Since we don't (*), we'll
166 * just use the universal macros.
167 *
168 * (*) On ARM V5 and later, there's a CLZ function which could be
169 *     used to implement much quicker than the default macro.
170 */
171
172#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
173
174#define CPU_MAXIMUM_PROCESSORS 32
175
176/** @} */
177
178#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
179  #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
180#endif
181
182#ifdef ARM_MULTILIB_VFP
183  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
184#endif
185
186#ifdef ARM_MULTILIB_ARCH_V4
187  #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 40
188#endif
189
190#ifdef RTEMS_SMP
191  #if defined(ARM_MULTILIB_VFP)
192    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
193  #elif defined(ARM_MULTILIB_HAS_THREAD_ID_REGISTER)
194    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
195  #else
196    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 44
197  #endif
198#endif
199
200#define ARM_EXCEPTION_FRAME_SIZE 80
201
202#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
203
204#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
205
206#define ARM_VFP_CONTEXT_SIZE 264
207
208#ifndef ASM
209
210#ifdef __cplusplus
211extern "C" {
212#endif
213
214/**
215 * @addtogroup ScoreCPU
216 */
217/**@{**/
218
219typedef struct {
220#if defined(ARM_MULTILIB_ARCH_V4)
221  uint32_t register_r4;
222  uint32_t register_r5;
223  uint32_t register_r6;
224  uint32_t register_r7;
225  uint32_t register_r8;
226  uint32_t register_r9;
227  uint32_t register_r10;
228  uint32_t register_fp;
229  uint32_t register_sp;
230  uint32_t register_lr;
231  uint32_t isr_dispatch_disable;
232#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
233  uint32_t register_r4;
234  uint32_t register_r5;
235  uint32_t register_r6;
236  uint32_t register_r7;
237  uint32_t register_r8;
238  uint32_t register_r9;
239  uint32_t register_r10;
240  uint32_t register_r11;
241  void *register_lr;
242  void *register_sp;
243  uint32_t isr_nest_level;
244#else
245  void *register_sp;
246#endif
247#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
248  uint32_t thread_id;
249#endif
250#ifdef ARM_MULTILIB_VFP
251  uint64_t register_d8;
252  uint64_t register_d9;
253  uint64_t register_d10;
254  uint64_t register_d11;
255  uint64_t register_d12;
256  uint64_t register_d13;
257  uint64_t register_d14;
258  uint64_t register_d15;
259#endif
260#ifdef RTEMS_SMP
261  volatile bool is_executing;
262#endif
263} Context_Control;
264
265typedef struct {
266  /* Not supported */
267} Context_Control_fp;
268
269static inline void _ARM_Data_memory_barrier( void )
270{
271#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
272  __asm__ volatile ( "dmb" : : : "memory" );
273#else
274  RTEMS_COMPILER_MEMORY_BARRIER();
275#endif
276}
277
278static inline void _ARM_Data_synchronization_barrier( void )
279{
280#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
281  __asm__ volatile ( "dsb" : : : "memory" );
282#else
283  RTEMS_COMPILER_MEMORY_BARRIER();
284#endif
285}
286
287static inline void _ARM_Instruction_synchronization_barrier( void )
288{
289#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
290  __asm__ volatile ( "isb" : : : "memory" );
291#else
292  RTEMS_COMPILER_MEMORY_BARRIER();
293#endif
294}
295
296#if defined(ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE)
297uint32_t arm_interrupt_disable( void );
298void arm_interrupt_enable( uint32_t level );
299void arm_interrupt_flash( uint32_t level );
300#else
301static inline uint32_t arm_interrupt_disable( void )
302{
303  uint32_t level;
304
305#if defined(ARM_MULTILIB_ARCH_V4)
306  uint32_t arm_switch_reg;
307
308  /*
309   * Disable only normal interrupts (IRQ).
310   *
311   * In order to support fast interrupts (FIQ) such that they can do something
312   * useful, we have to disable the operating system support for FIQs.  Having
313   * operating system support for them would require that FIQs are disabled
314   * during critical sections of the operating system and application.  At this
315   * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
316   * the non critical sections of IRQs, so here they would have a small
317   * advantage.  Without operating system support, the FIQs can execute at any
318   * time (of course not during the service of another FIQ). If someone needs
319   * operating system support for a FIQ, she can trigger a software interrupt and
320   * service the request in a two-step process.
321   */
322  __asm__ volatile (
323    ARM_SWITCH_TO_ARM
324    "mrs %[level], cpsr\n"
325    "orr %[arm_switch_reg], %[level], #0x80\n"
326    "msr cpsr, %[arm_switch_reg]\n"
327    ARM_SWITCH_BACK
328    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
329  );
330#elif defined(ARM_MULTILIB_ARCH_V7M)
331  uint32_t basepri = 0x80;
332
333  __asm__ volatile (
334    "mrs %[level], basepri\n"
335    "msr basepri_max, %[basepri]\n"
336    : [level] "=&r" (level)
337    : [basepri] "r" (basepri)
338  );
339#endif
340
341  return level;
342}
343
344static inline void arm_interrupt_enable( uint32_t level )
345{
346#if defined(ARM_MULTILIB_ARCH_V4)
347  ARM_SWITCH_REGISTERS;
348
349  __asm__ volatile (
350    ARM_SWITCH_TO_ARM
351    "msr cpsr, %[level]\n"
352    ARM_SWITCH_BACK
353    : ARM_SWITCH_OUTPUT
354    : [level] "r" (level)
355  );
356#elif defined(ARM_MULTILIB_ARCH_V7M)
357  __asm__ volatile (
358    "msr basepri, %[level]\n"
359    :
360    : [level] "r" (level)
361  );
362#endif
363}
364
365static inline void arm_interrupt_flash( uint32_t level )
366{
367#if defined(ARM_MULTILIB_ARCH_V4)
368  uint32_t arm_switch_reg;
369
370  __asm__ volatile (
371    ARM_SWITCH_TO_ARM
372    "mrs %[arm_switch_reg], cpsr\n"
373    "msr cpsr, %[level]\n"
374    "msr cpsr, %[arm_switch_reg]\n"
375    ARM_SWITCH_BACK
376    : [arm_switch_reg] "=&r" (arm_switch_reg)
377    : [level] "r" (level)
378  );
379#elif defined(ARM_MULTILIB_ARCH_V7M)
380  uint32_t basepri;
381
382  __asm__ volatile (
383    "mrs %[basepri], basepri\n"
384    "msr basepri, %[level]\n"
385    "msr basepri, %[basepri]\n"
386    : [basepri] "=&r" (basepri)
387    : [level] "r" (level)
388  );
389#endif
390}
391#endif  /* !ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE */
392
393#define _CPU_ISR_Disable( _isr_cookie ) \
394  do { \
395    _isr_cookie = arm_interrupt_disable(); \
396  } while (0)
397
398#define _CPU_ISR_Enable( _isr_cookie )  \
399  arm_interrupt_enable( _isr_cookie )
400
401#define _CPU_ISR_Flash( _isr_cookie ) \
402  arm_interrupt_flash( _isr_cookie )
403
404RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
405{
406#if defined(ARM_MULTILIB_ARCH_V4)
407  return ( level & 0x80 ) == 0;
408#elif defined(ARM_MULTILIB_ARCH_V7M)
409  return level == 0;
410#endif
411}
412
413void _CPU_ISR_Set_level( uint32_t level );
414
415uint32_t _CPU_ISR_Get_level( void );
416
417void _CPU_Context_Initialize(
418  Context_Control *the_context,
419  void *stack_area_begin,
420  size_t stack_area_size,
421  uint32_t new_level,
422  void (*entry_point)( void ),
423  bool is_fp,
424  void *tls_area
425);
426
427#define _CPU_Context_Get_SP( _context ) \
428  (_context)->register_sp
429
430#ifdef RTEMS_SMP
431  static inline bool _CPU_Context_Get_is_executing(
432    const Context_Control *context
433  )
434  {
435    return context->is_executing;
436  }
437
438  static inline void _CPU_Context_Set_is_executing(
439    Context_Control *context,
440    bool is_executing
441  )
442  {
443    context->is_executing = is_executing;
444  }
445#endif
446
447#define _CPU_Context_Restart_self( _the_context ) \
448   _CPU_Context_restore( (_the_context) );
449
450#define _CPU_Context_Initialize_fp( _destination ) \
451  do { \
452    *(*(_destination)) = _CPU_Null_fp_context; \
453  } while (0)
454
455#define _CPU_Fatal_halt( _source, _err )    \
456   do {                                     \
457     uint32_t _level;                       \
458     uint32_t _error = _err;                \
459     _CPU_ISR_Disable( _level );            \
460     (void) _level;                         \
461     __asm__ volatile ("mov r0, %0\n"       \
462                   : "=r" (_error)          \
463                   : "0" (_error)           \
464                   : "r0" );                \
465     while (1);                             \
466   } while (0);
467
468/**
469 * @brief CPU initialization.
470 */
471void _CPU_Initialize( void );
472
473void _CPU_ISR_install_vector(
474  uint32_t vector,
475  proc_ptr new_handler,
476  proc_ptr *old_handler
477);
478
479/**
480 * @brief CPU switch context.
481 */
482void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
483
484void _CPU_Context_restore( Context_Control *new_context )
485  RTEMS_NO_RETURN;
486
487#if defined(ARM_MULTILIB_ARCH_V7M)
488  void _ARMV7M_Start_multitasking( Context_Control *heir )
489    RTEMS_NO_RETURN;
490  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
491#endif
492
493#ifdef RTEMS_SMP
494  uint32_t _CPU_SMP_Initialize( void );
495
496  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
497
498  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
499
500  void _CPU_SMP_Prepare_start_multitasking( void );
501
502  static inline uint32_t _CPU_SMP_Get_current_processor( void )
503  {
504    uint32_t mpidr;
505
506    /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
507    __asm__ volatile (
508      "mrc p15, 0, %[mpidr], c0, c0, 5\n"
509      : [mpidr] "=&r" (mpidr)
510    );
511
512    return mpidr & 0xffU;
513  }
514
515  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
516
517  static inline void _ARM_Send_event( void )
518  {
519    __asm__ volatile ( "sev" : : : "memory" );
520  }
521
522  static inline void _ARM_Wait_for_event( void )
523  {
524    __asm__ volatile ( "wfe" : : : "memory" );
525  }
526
527  static inline void _CPU_SMP_Processor_event_broadcast( void )
528  {
529    _ARM_Data_synchronization_barrier();
530    _ARM_Send_event();
531  }
532
533  static inline void _CPU_SMP_Processor_event_receive( void )
534  {
535    _ARM_Wait_for_event();
536    _ARM_Data_memory_barrier();
537  }
538#endif
539
540
541static inline uint32_t CPU_swap_u32( uint32_t value )
542{
543#if defined(__thumb2__)
544  __asm__ volatile (
545    "rev %0, %0"
546    : "=r" (value)
547    : "0" (value)
548  );
549  return value;
550#elif defined(__thumb__)
551  uint32_t byte1, byte2, byte3, byte4, swapped;
552
553  byte4 = (value >> 24) & 0xff;
554  byte3 = (value >> 16) & 0xff;
555  byte2 = (value >> 8)  & 0xff;
556  byte1 =  value & 0xff;
557
558  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
559  return swapped;
560#else
561  uint32_t tmp = value; /* make compiler warnings go away */
562  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
563                "BIC %1, %1, #0xff0000\n"
564                "MOV %0, %0, ROR #8\n"
565                "EOR %0, %0, %1, LSR #8\n"
566                : "=r" (value), "=r" (tmp)
567                : "0" (value), "1" (tmp));
568  return value;
569#endif
570}
571
572static inline uint16_t CPU_swap_u16( uint16_t value )
573{
574#if defined(__thumb2__)
575  __asm__ volatile (
576    "rev16 %0, %0"
577    : "=r" (value)
578    : "0" (value)
579  );
580  return value;
581#else
582  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
583#endif
584}
585
586typedef uint32_t CPU_Counter_ticks;
587
588uint32_t _CPU_Counter_frequency( void );
589
590CPU_Counter_ticks _CPU_Counter_read( void );
591
592static inline CPU_Counter_ticks _CPU_Counter_difference(
593  CPU_Counter_ticks second,
594  CPU_Counter_ticks first
595)
596{
597  return second - first;
598}
599
600#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
601  void *_CPU_Thread_Idle_body( uintptr_t ignored );
602#endif
603
604/** @} */
605
606/**
607 * @addtogroup ScoreCPUARM
608 */
609/**@{**/
610
611#if defined(ARM_MULTILIB_ARCH_V4)
612
613typedef enum {
614  ARM_EXCEPTION_RESET = 0,
615  ARM_EXCEPTION_UNDEF = 1,
616  ARM_EXCEPTION_SWI = 2,
617  ARM_EXCEPTION_PREF_ABORT = 3,
618  ARM_EXCEPTION_DATA_ABORT = 4,
619  ARM_EXCEPTION_RESERVED = 5,
620  ARM_EXCEPTION_IRQ = 6,
621  ARM_EXCEPTION_FIQ = 7,
622  MAX_EXCEPTIONS = 8,
623  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
624} Arm_symbolic_exception_name;
625
626#endif /* defined(ARM_MULTILIB_ARCH_V4) */
627
628typedef struct {
629  uint32_t register_fpexc;
630  uint32_t register_fpscr;
631  uint64_t register_d0;
632  uint64_t register_d1;
633  uint64_t register_d2;
634  uint64_t register_d3;
635  uint64_t register_d4;
636  uint64_t register_d5;
637  uint64_t register_d6;
638  uint64_t register_d7;
639  uint64_t register_d8;
640  uint64_t register_d9;
641  uint64_t register_d10;
642  uint64_t register_d11;
643  uint64_t register_d12;
644  uint64_t register_d13;
645  uint64_t register_d14;
646  uint64_t register_d15;
647  uint64_t register_d16;
648  uint64_t register_d17;
649  uint64_t register_d18;
650  uint64_t register_d19;
651  uint64_t register_d20;
652  uint64_t register_d21;
653  uint64_t register_d22;
654  uint64_t register_d23;
655  uint64_t register_d24;
656  uint64_t register_d25;
657  uint64_t register_d26;
658  uint64_t register_d27;
659  uint64_t register_d28;
660  uint64_t register_d29;
661  uint64_t register_d30;
662  uint64_t register_d31;
663} ARM_VFP_context;
664
665typedef struct {
666  uint32_t register_r0;
667  uint32_t register_r1;
668  uint32_t register_r2;
669  uint32_t register_r3;
670  uint32_t register_r4;
671  uint32_t register_r5;
672  uint32_t register_r6;
673  uint32_t register_r7;
674  uint32_t register_r8;
675  uint32_t register_r9;
676  uint32_t register_r10;
677  uint32_t register_r11;
678  uint32_t register_r12;
679  uint32_t register_sp;
680  void *register_lr;
681  void *register_pc;
682#if defined(ARM_MULTILIB_ARCH_V4)
683  uint32_t register_cpsr;
684  Arm_symbolic_exception_name vector;
685#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
686  uint32_t register_xpsr;
687  uint32_t vector;
688#endif
689  const ARM_VFP_context *vfp_context;
690  uint32_t reserved_for_stack_alignment;
691} CPU_Exception_frame;
692
693void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
694
695void _ARM_Exception_default( CPU_Exception_frame *frame );
696
697/** @} */
698
699/** Type that can store a 32-bit integer or a pointer. */
700typedef uintptr_t CPU_Uint32ptr;
701
702#ifdef __cplusplus
703}
704#endif
705
706#endif /* ASM */
707
708#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.