source: rtems/cpukit/score/cpu/arm/rtems/score/cpu.h @ a6c5a7e0

4.115
Last change on this file since a6c5a7e0 was a6c5a7e0, checked in by Daniel Krueger <daniel.krueger@…>, on 03/17/15 at 14:02:59

arm: Align ARM exception frame to 8 bytes

The stack pointer must be aligned on 8 byte boundary on ARM, so the size of
the exception frame must be a multiple of 8 bytes. Otherwise we might/will
get an alignment fault, when executing code in the data abort handler for
example.

Close #2318.

Signed-off-by: Daniel Krueger <daniel.krueger@…>

  • Property mode set to 100644
File size: 16.3 KB
RevLine 
[da215ded]1/**
[78623bce]2 * @file
3 *
[66fffc7]4 * @brief ARM Architecture Support API
[da215ded]5 */
6
[4f0b287]7/*
8 *  This include file contains information pertaining to the ARM
[08330bf]9 *  processor.
10 *
[8ae37323]11 *  Copyright (c) 2009-2014 embedded brains GmbH.
[39c8fdb]12 *
[a3ff693]13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
[6a07436]15 *  Copyright (c) 2006 OAR Corporation
16 *
[fa237002]17 *  Copyright (c) 2002 Advent Networks, Inc.
[4f0b287]18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
[08330bf]20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
[c499856]25 *  http://www.rtems.org/license/LICENSE.
[08330bf]26 *
27 */
28
[7f70d1b7]29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
[08330bf]31
[89b85e51]32#include <rtems/score/types.h>
[632e4306]33#include <rtems/score/arm.h>
[08330bf]34
[c5ed148]35#if defined(ARM_MULTILIB_ARCH_V4)
36
[78623bce]37/**
38 * @defgroup ScoreCPUARM ARM Specific Support
39 *
40 * @ingroup ScoreCPU
41 *
42 * @brief ARM specific support.
43 */
[b697bc6]44/**@{**/
[78623bce]45
[2bbea4dd]46#if defined(__thumb__) && !defined(__thumb2__)
[39c8fdb]47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
[248e29a]52#else
[39c8fdb]53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
[248e29a]58#endif
[08330bf]59
[78623bce]60/**
61 * @name Program Status Register
62 */
[b697bc6]63/**@{**/
[78623bce]64
[39c8fdb]65#define ARM_PSR_N (1 << 31)
66#define ARM_PSR_Z (1 << 30)
67#define ARM_PSR_C (1 << 29)
68#define ARM_PSR_V (1 << 28)
69#define ARM_PSR_Q (1 << 27)
70#define ARM_PSR_J (1 << 24)
71#define ARM_PSR_GE_SHIFT 16
72#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73#define ARM_PSR_E (1 << 9)
74#define ARM_PSR_A (1 << 8)
75#define ARM_PSR_I (1 << 7)
76#define ARM_PSR_F (1 << 6)
77#define ARM_PSR_T (1 << 5)
78#define ARM_PSR_M_SHIFT 0
79#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80#define ARM_PSR_M_USR 0x10
81#define ARM_PSR_M_FIQ 0x11
82#define ARM_PSR_M_IRQ 0x12
83#define ARM_PSR_M_SVC 0x13
84#define ARM_PSR_M_ABT 0x17
85#define ARM_PSR_M_UND 0x1b
86#define ARM_PSR_M_SYS 0x1f
87
[78623bce]88/** @} */
89
90/** @} */
91
[c5ed148]92#endif /* defined(ARM_MULTILIB_ARCH_V4) */
93
[78623bce]94/**
95 * @addtogroup ScoreCPU
96 */
[b697bc6]97/**@{**/
[78623bce]98
[632e4306]99/* If someone uses THUMB we assume she wants minimal code size */
100#ifdef __thumb__
101  #define CPU_INLINE_ENABLE_DISPATCH FALSE
102#else
103  #define CPU_INLINE_ENABLE_DISPATCH TRUE
104#endif
[08330bf]105
[632e4306]106#if defined(__ARMEL__)
107  #define CPU_BIG_ENDIAN FALSE
108  #define CPU_LITTLE_ENDIAN TRUE
109#elif defined(__ARMEB__)
110  #define CPU_BIG_ENDIAN TRUE
111  #define CPU_LITTLE_ENDIAN FALSE
112#else
113  #error "unknown endianness"
114#endif
[08330bf]115
[dea10503]116/*
117 *  The ARM uses the PIC interrupt model.
118 */
119#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
120
[08330bf]121#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
122
[c5ed148]123#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
[08330bf]124
[fa237002]125#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
[08330bf]126
127#define CPU_ISR_PASSES_FRAME_POINTER 0
128
[cfd8d7a]129#define CPU_HARDWARE_FP FALSE
[08330bf]130
[632e4306]131#define CPU_SOFTWARE_FP FALSE
[08330bf]132
[632e4306]133#define CPU_ALL_TASKS_ARE_FP FALSE
[08330bf]134
[632e4306]135#define CPU_IDLE_TASK_IS_FP FALSE
[08330bf]136
[632e4306]137#define CPU_USE_DEFERRED_FP_SWITCH FALSE
[08330bf]138
[d9bd5cd6]139#if defined(ARM_MULTILIB_HAS_WFI)
[f4539aa]140  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
141#else
142  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
143#endif
[08330bf]144
[632e4306]145#define CPU_STACK_GROWS_UP FALSE
[08330bf]146
[632e4306]147/* XXX Why 32? */
148#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
[08330bf]149
[46689a1e]150#define CPU_TIMESTAMP_USE_STRUCT_TIMESPEC TRUE
[d0630763]151
[08330bf]152/*
[632e4306]153 * The interrupt mask disables only normal interrupts (IRQ).
[08330bf]154 *
[632e4306]155 * In order to support fast interrupts (FIQ) such that they can do something
156 * useful, we have to disable the operating system support for FIQs.  Having
157 * operating system support for them would require that FIQs are disabled
158 * during critical sections of the operating system and application.  At this
159 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
160 * the non critical sections of IRQs, so here they would have a small
161 * advantage.  Without operating system support, the FIQs can execute at any
162 * time (of course not during the service of another FIQ). If someone needs
163 * operating system support for a FIQ, she can trigger a software interrupt and
164 * service the request in a two-step process.
[08330bf]165 */
[007bdc4]166#define CPU_MODES_INTERRUPT_MASK 0x1
[08330bf]167
168#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
169
170#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
171
[4db30283]172#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
173
[632e4306]174#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
[08330bf]175
[f1738ed]176/* AAPCS, section 4.1, Fundamental Data Types */
177#define CPU_SIZEOF_POINTER 4
178
[71c8457]179/* AAPCS, section 4.1, Fundamental Data Types */
180#define CPU_ALIGNMENT 8
[08330bf]181
[632e4306]182#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
[08330bf]183
[86820eda]184/* AAPCS, section 4.3.1, Aggregates */
185#define CPU_PARTITION_ALIGNMENT 4
[08330bf]186
[71c8457]187/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
188#define CPU_STACK_ALIGNMENT 8
[08330bf]189
190/*
[632e4306]191 * Bitfield handler macros.
[08330bf]192 *
[632e4306]193 * If we had a particularly fast function for finding the first
194 * bit set in a word, it would go here. Since we don't (*), we'll
195 * just use the universal macros.
[08330bf]196 *
[632e4306]197 * (*) On ARM V5 and later, there's a CLZ function which could be
198 *     used to implement much quicker than the default macro.
[08330bf]199 */
200
[632e4306]201#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
[08330bf]202
[632e4306]203#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
[08330bf]204
[10fd4aac]205#define CPU_PER_CPU_CONTROL_SIZE 0
206
[78623bce]207/** @} */
208
[022851a]209#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
210  #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
211#endif
212
[8ae37323]213#ifdef ARM_MULTILIB_VFP
[cfd8d7a]214  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
215#endif
216
[38b59a6]217#ifdef RTEMS_SMP
[8ae37323]218  #ifdef ARM_MULTILIB_VFP
[38b59a6]219    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
220  #else
221    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
222  #endif
223#endif
224
[a6c5a7e0]225#define ARM_EXCEPTION_FRAME_SIZE 80
[cfd8d7a]226
227#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
228
229#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
230
231#define ARM_VFP_CONTEXT_SIZE 264
232
[632e4306]233#ifndef ASM
[2d877aa]234
[632e4306]235#ifdef __cplusplus
236extern "C" {
237#endif
[2d877aa]238
[78623bce]239/**
240 * @addtogroup ScoreCPU
241 */
[b697bc6]242/**@{**/
[661e5de4]243
[10fd4aac]244typedef struct {
245  /* There is no CPU specific per-CPU state */
246} CPU_Per_CPU_control;
247
[632e4306]248typedef struct {
[c5ed148]249#if defined(ARM_MULTILIB_ARCH_V4)
[632e4306]250  uint32_t register_cpsr;
251  uint32_t register_r4;
252  uint32_t register_r5;
253  uint32_t register_r6;
254  uint32_t register_r7;
255  uint32_t register_r8;
256  uint32_t register_r9;
257  uint32_t register_r10;
258  uint32_t register_fp;
259  uint32_t register_sp;
260  uint32_t register_lr;
[5759510]261#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
[c5ed148]262  uint32_t register_r4;
263  uint32_t register_r5;
264  uint32_t register_r6;
265  uint32_t register_r7;
266  uint32_t register_r8;
267  uint32_t register_r9;
268  uint32_t register_r10;
269  uint32_t register_r11;
270  void *register_lr;
271  void *register_sp;
272  uint32_t isr_nest_level;
[f442e6b4]273#else
274  void *register_sp;
[c5ed148]275#endif
[022851a]276#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
277  uint32_t thread_id;
278#endif
[8ae37323]279#ifdef ARM_MULTILIB_VFP
[cfd8d7a]280  uint64_t register_d8;
281  uint64_t register_d9;
282  uint64_t register_d10;
283  uint64_t register_d11;
284  uint64_t register_d12;
285  uint64_t register_d13;
286  uint64_t register_d14;
287  uint64_t register_d15;
288#endif
[38b59a6]289#ifdef RTEMS_SMP
290  volatile bool is_executing;
291#endif
[632e4306]292} Context_Control;
[661e5de4]293
[632e4306]294typedef struct {
295  /* Not supported */
296} Context_Control_fp;
[661e5de4]297
[78623bce]298extern uint32_t arm_cpu_mode;
299
[1a246d7e]300static inline void _ARM_Data_memory_barrier( void )
301{
[7c90670]302#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
[1a246d7e]303  __asm__ volatile ( "dmb" : : : "memory" );
[7c90670]304#endif
[1a246d7e]305}
306
307static inline void _ARM_Data_synchronization_barrier( void )
308{
[7c90670]309#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
[1a246d7e]310  __asm__ volatile ( "dsb" : : : "memory" );
[7c90670]311#endif
[1a246d7e]312}
313
314static inline void _ARM_Instruction_synchronization_barrier( void )
315{
[7c90670]316#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
[1a246d7e]317  __asm__ volatile ( "isb" : : : "memory" );
[7c90670]318#endif
[1a246d7e]319}
320
[632e4306]321static inline uint32_t arm_interrupt_disable( void )
322{
[f442e6b4]323  uint32_t level;
324
[c5ed148]325#if defined(ARM_MULTILIB_ARCH_V4)
[39c8fdb]326  uint32_t arm_switch_reg;
[632e4306]327
[9f9371f]328  __asm__ volatile (
[39c8fdb]329    ARM_SWITCH_TO_ARM
330    "mrs %[level], cpsr\n"
331    "orr %[arm_switch_reg], %[level], #0x80\n"
332    "msr cpsr, %[arm_switch_reg]\n"
333    ARM_SWITCH_BACK
334    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
[632e4306]335  );
[c5ed148]336#elif defined(ARM_MULTILIB_ARCH_V7M)
337  uint32_t basepri = 0x80;
338
339  __asm__ volatile (
340    "mrs %[level], basepri\n"
341    "msr basepri_max, %[basepri]\n"
342    : [level] "=&r" (level)
343    : [basepri] "r" (basepri)
344  );
[f442e6b4]345#else
346  level = 0;
347#endif
[c5ed148]348
349  return level;
[632e4306]350}
[08330bf]351
[632e4306]352static inline void arm_interrupt_enable( uint32_t level )
353{
[c5ed148]354#if defined(ARM_MULTILIB_ARCH_V4)
[39c8fdb]355  ARM_SWITCH_REGISTERS;
356
[9f9371f]357  __asm__ volatile (
[39c8fdb]358    ARM_SWITCH_TO_ARM
359    "msr cpsr, %[level]\n"
360    ARM_SWITCH_BACK
361    : ARM_SWITCH_OUTPUT
362    : [level] "r" (level)
363  );
[c5ed148]364#elif defined(ARM_MULTILIB_ARCH_V7M)
365  __asm__ volatile (
366    "msr basepri, %[level]\n"
367    :
368    : [level] "r" (level)
369  );
370#endif
[632e4306]371}
[08330bf]372
[632e4306]373static inline void arm_interrupt_flash( uint32_t level )
374{
[c5ed148]375#if defined(ARM_MULTILIB_ARCH_V4)
[39c8fdb]376  uint32_t arm_switch_reg;
[632e4306]377
[9f9371f]378  __asm__ volatile (
[39c8fdb]379    ARM_SWITCH_TO_ARM
380    "mrs %[arm_switch_reg], cpsr\n"
381    "msr cpsr, %[level]\n"
382    "msr cpsr, %[arm_switch_reg]\n"
383    ARM_SWITCH_BACK
384    : [arm_switch_reg] "=&r" (arm_switch_reg)
385    : [level] "r" (level)
[632e4306]386  );
[c5ed148]387#elif defined(ARM_MULTILIB_ARCH_V7M)
388  uint32_t basepri;
389
390  __asm__ volatile (
391    "mrs %[basepri], basepri\n"
392    "msr basepri, %[level]\n"
393    "msr basepri, %[basepri]\n"
394    : [basepri] "=&r" (basepri)
395    : [level] "r" (level)
396  );
397#endif
[632e4306]398}
[4f0b287]399
[632e4306]400#define _CPU_ISR_Disable( _isr_cookie ) \
401  do { \
402    _isr_cookie = arm_interrupt_disable(); \
403  } while (0)
[08330bf]404
[632e4306]405#define _CPU_ISR_Enable( _isr_cookie )  \
406  arm_interrupt_enable( _isr_cookie )
[08330bf]407
[632e4306]408#define _CPU_ISR_Flash( _isr_cookie ) \
409  arm_interrupt_flash( _isr_cookie )
[08330bf]410
[632e4306]411void _CPU_ISR_Set_level( uint32_t level );
412
413uint32_t _CPU_ISR_Get_level( void );
[08330bf]414
[4f0b287]415void _CPU_Context_Initialize(
[632e4306]416  Context_Control *the_context,
[c5ed148]417  void *stack_area_begin,
418  size_t stack_area_size,
[632e4306]419  uint32_t new_level,
[c5ed148]420  void (*entry_point)( void ),
[022851a]421  bool is_fp,
422  void *tls_area
[4f0b287]423);
[08330bf]424
[632e4306]425#define _CPU_Context_Get_SP( _context ) \
426  (_context)->register_sp
[08330bf]427
[38b59a6]428#ifdef RTEMS_SMP
[11b05f1]429  static inline bool _CPU_Context_Get_is_executing(
430    const Context_Control *context
431  )
432  {
433    return context->is_executing;
434  }
435
436  static inline void _CPU_Context_Set_is_executing(
437    Context_Control *context,
438    bool is_executing
439  )
440  {
441    context->is_executing = is_executing;
442  }
[38b59a6]443#endif
444
[08330bf]445#define _CPU_Context_Restart_self( _the_context ) \
446   _CPU_Context_restore( (_the_context) );
447
448#define _CPU_Context_Fp_start( _base, _offset ) \
449   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
450
451#define _CPU_Context_Initialize_fp( _destination ) \
[632e4306]452  do { \
453    *(*(_destination)) = _CPU_Null_fp_context; \
454  } while (0)
[08330bf]455
[f82752a4]456#define _CPU_Fatal_halt( _source, _err )    \
[fa237002]457   do {                                     \
[5e61c80]458     uint32_t _level;                       \
459     uint32_t _error = _err;                \
[fa237002]460     _CPU_ISR_Disable( _level );            \
[f859d20]461     (void) _level;                         \
462     __asm__ volatile ("mov r0, %0\n"       \
[fa237002]463                   : "=r" (_error)          \
464                   : "0" (_error)           \
465                   : "r0" );                \
[632e4306]466     while (1);                             \
467   } while (0);
[08330bf]468
[43e0599]469/**
[66fffc7]470 * @brief CPU initialization.
[43e0599]471 */
[632e4306]472void _CPU_Initialize( void );
[08330bf]473
474void _CPU_ISR_install_vector(
[632e4306]475  uint32_t vector,
476  proc_ptr new_handler,
477  proc_ptr *old_handler
[08330bf]478);
479
[43e0599]480/**
[66fffc7]481 * @brief CPU switch context.
[43e0599]482 */
[632e4306]483void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
[08330bf]484
[db0df7b]485void _CPU_Context_restore( Context_Control *new_context )
[c5ed148]486  RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
487
488#if defined(ARM_MULTILIB_ARCH_V7M)
[bf19dbc3]489  void _ARMV7M_Start_multitasking( Context_Control *heir )
490    RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
[c5ed148]491  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
492#endif
[08330bf]493
[39993d6]494void _CPU_Context_volatile_clobber( uintptr_t pattern );
495
496void _CPU_Context_validate( uintptr_t pattern );
497
[f2f211c5]498#ifdef RTEMS_SMP
[53e008b]499  uint32_t _CPU_SMP_Initialize( void );
500
501  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
502
503  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
[4627fcd]504
[c34f94f7]505  void _CPU_SMP_Prepare_start_multitasking( void );
506
[47d60134]507  static inline uint32_t _CPU_SMP_Get_current_processor( void )
[39e51758]508  {
509    uint32_t mpidr;
510
511    /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
[47d60134]512    __asm__ volatile (
[39e51758]513      "mrc p15, 0, %[mpidr], c0, c0, 5\n"
514      : [mpidr] "=&r" (mpidr)
515    );
516
517    return mpidr & 0xffU;
518  }
519
[ca63ae2]520  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
521
[f2f211c5]522  static inline void _ARM_Send_event( void )
523  {
524    __asm__ volatile ( "sev" : : : "memory" );
525  }
526
527  static inline void _ARM_Wait_for_event( void )
528  {
529    __asm__ volatile ( "wfe" : : : "memory" );
530  }
531
[07f6e419]532  static inline void _CPU_SMP_Processor_event_broadcast( void )
[f2f211c5]533  {
534    _ARM_Data_synchronization_barrier();
535    _ARM_Send_event();
536  }
537
[f7740e97]538  static inline void _CPU_SMP_Processor_event_receive( void )
[f2f211c5]539  {
540    _ARM_Wait_for_event();
541    _ARM_Data_memory_barrier();
542  }
543#endif
544
545
[632e4306]546static inline uint32_t CPU_swap_u32( uint32_t value )
[08330bf]547{
[c5ed148]548#if defined(__thumb2__)
549  __asm__ volatile (
550    "rev %0, %0"
551    : "=r" (value)
552    : "0" (value)
553  );
554  return value;
555#elif defined(__thumb__)
[1c62f169]556  uint32_t byte1, byte2, byte3, byte4, swapped;
[632e4306]557
[1c62f169]558  byte4 = (value >> 24) & 0xff;
559  byte3 = (value >> 16) & 0xff;
560  byte2 = (value >> 8)  & 0xff;
[632e4306]561  byte1 =  value & 0xff;
562
[1c62f169]563  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
564  return swapped;
565#else
[632e4306]566  uint32_t tmp = value; /* make compiler warnings go away */
[9f9371f]567  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
[632e4306]568                "BIC %1, %1, #0xff0000\n"
569                "MOV %0, %0, ROR #8\n"
570                "EOR %0, %0, %1, LSR #8\n"
571                : "=r" (value), "=r" (tmp)
[1c62f169]572                : "0" (value), "1" (tmp));
573  return value;
574#endif
[08330bf]575}
576
[632e4306]577static inline uint16_t CPU_swap_u16( uint16_t value )
[fa237002]578{
[c5ed148]579#if defined(__thumb2__)
580  __asm__ volatile (
581    "rev16 %0, %0"
582    : "=r" (value)
583    : "0" (value)
584  );
585  return value;
586#else
[632e4306]587  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
[c5ed148]588#endif
[632e4306]589}
[b32fe793]590
[24bf11e]591typedef uint32_t CPU_Counter_ticks;
592
593CPU_Counter_ticks _CPU_Counter_read( void );
594
595CPU_Counter_ticks _CPU_Counter_difference(
596  CPU_Counter_ticks second,
597  CPU_Counter_ticks first
598);
599
[f4539aa]600#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
601  void *_CPU_Thread_Idle_body( uintptr_t ignored );
602#endif
603
[78623bce]604/** @} */
[b32fe793]605
[78623bce]606/**
607 * @addtogroup ScoreCPUARM
608 */
[b697bc6]609/**@{**/
[632e4306]610
[0d8cde9]611#if defined(ARM_MULTILIB_ARCH_V4)
612
[78623bce]613typedef enum {
614  ARM_EXCEPTION_RESET = 0,
615  ARM_EXCEPTION_UNDEF = 1,
616  ARM_EXCEPTION_SWI = 2,
617  ARM_EXCEPTION_PREF_ABORT = 3,
618  ARM_EXCEPTION_DATA_ABORT = 4,
619  ARM_EXCEPTION_RESERVED = 5,
620  ARM_EXCEPTION_IRQ = 6,
621  ARM_EXCEPTION_FIQ = 7,
[8d687737]622  MAX_EXCEPTIONS = 8,
623  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
[78623bce]624} Arm_symbolic_exception_name;
625
[0d8cde9]626#endif /* defined(ARM_MULTILIB_ARCH_V4) */
[78623bce]627
[cfd8d7a]628typedef struct {
629  uint32_t register_fpexc;
630  uint32_t register_fpscr;
631  uint64_t register_d0;
632  uint64_t register_d1;
633  uint64_t register_d2;
634  uint64_t register_d3;
635  uint64_t register_d4;
636  uint64_t register_d5;
637  uint64_t register_d6;
638  uint64_t register_d7;
639  uint64_t register_d8;
640  uint64_t register_d9;
641  uint64_t register_d10;
642  uint64_t register_d11;
643  uint64_t register_d12;
644  uint64_t register_d13;
645  uint64_t register_d14;
646  uint64_t register_d15;
647  uint64_t register_d16;
648  uint64_t register_d17;
649  uint64_t register_d18;
650  uint64_t register_d19;
651  uint64_t register_d20;
652  uint64_t register_d21;
653  uint64_t register_d22;
654  uint64_t register_d23;
655  uint64_t register_d24;
656  uint64_t register_d25;
657  uint64_t register_d26;
658  uint64_t register_d27;
659  uint64_t register_d28;
660  uint64_t register_d29;
661  uint64_t register_d30;
662  uint64_t register_d31;
663} ARM_VFP_context;
664
[78623bce]665typedef struct {
666  uint32_t register_r0;
667  uint32_t register_r1;
668  uint32_t register_r2;
669  uint32_t register_r3;
[8d687737]670  uint32_t register_r4;
671  uint32_t register_r5;
672  uint32_t register_r6;
673  uint32_t register_r7;
674  uint32_t register_r8;
675  uint32_t register_r9;
676  uint32_t register_r10;
677  uint32_t register_r11;
678  uint32_t register_r12;
679  uint32_t register_sp;
[0d8cde9]680  void *register_lr;
681  void *register_pc;
682#if defined(ARM_MULTILIB_ARCH_V4)
[8d687737]683  uint32_t register_cpsr;
684  Arm_symbolic_exception_name vector;
[5759510]685#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
[0d8cde9]686  uint32_t register_xpsr;
687  uint32_t vector;
688#endif
[cfd8d7a]689  const ARM_VFP_context *vfp_context;
[a6c5a7e0]690  uint32_t reserved_for_stack_alignment;
[78623bce]691} CPU_Exception_frame;
692
693typedef CPU_Exception_frame CPU_Interrupt_frame;
694
[815994f]695void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
696
[50f3c42b]697void _ARM_Exception_default( CPU_Exception_frame *frame );
698
[037b57a]699/*
700 * FIXME: In case your BSP uses this function, then convert it to use
701 * the shared start.S file for ARM.
702 */
703void rtems_exception_init_mngt( void );
704
[0d8cde9]705/** @} */
706
[08330bf]707#ifdef __cplusplus
708}
709#endif
710
[632e4306]711#endif /* ASM */
712
713#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.