source: rtems/cpukit/score/cpu/arm/rtems/score/cpu.h @ 27bfcd8

5
Last change on this file since 27bfcd8 was 27bfcd8, checked in by Sebastian Huber <sebastian.huber@…>, on 01/25/17 at 13:32:02

score: Delete _CPU_Context_Fp_start()

Since the FP area pointer is passed by reference in
_CPU_Context_Initialize_fp() the optional FP area adjustment via
_CPU_Context_Fp_start() is superfluous. It is also wrong with respect
to memory management, e.g. pointer passed to _Workspace_Free() may be
not the one returned by _Workspace_Allocate().

Close #1400.

  • Property mode set to 100644
File size: 16.5 KB
RevLine 
[da215ded]1/**
[78623bce]2 * @file
3 *
[66fffc7]4 * @brief ARM Architecture Support API
[da215ded]5 */
6
[4f0b287]7/*
8 *  This include file contains information pertaining to the ARM
[08330bf]9 *  processor.
10 *
[d59585d]11 *  Copyright (c) 2009, 2016 embedded brains GmbH
[39c8fdb]12 *
[a3ff693]13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
[6a07436]15 *  Copyright (c) 2006 OAR Corporation
16 *
[fa237002]17 *  Copyright (c) 2002 Advent Networks, Inc.
[4f0b287]18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
[08330bf]20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
[c499856]25 *  http://www.rtems.org/license/LICENSE.
[08330bf]26 *
27 */
28
[7f70d1b7]29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
[08330bf]31
[89b85e51]32#include <rtems/score/types.h>
[632e4306]33#include <rtems/score/arm.h>
[08330bf]34
[c5ed148]35#if defined(ARM_MULTILIB_ARCH_V4)
36
[78623bce]37/**
38 * @defgroup ScoreCPUARM ARM Specific Support
39 *
40 * @ingroup ScoreCPU
41 *
42 * @brief ARM specific support.
43 */
[b697bc6]44/**@{**/
[78623bce]45
[2bbea4dd]46#if defined(__thumb__) && !defined(__thumb2__)
[39c8fdb]47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
[248e29a]52#else
[39c8fdb]53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
[248e29a]58#endif
[08330bf]59
[78623bce]60/**
61 * @name Program Status Register
62 */
[b697bc6]63/**@{**/
[78623bce]64
[39c8fdb]65#define ARM_PSR_N (1 << 31)
66#define ARM_PSR_Z (1 << 30)
67#define ARM_PSR_C (1 << 29)
68#define ARM_PSR_V (1 << 28)
69#define ARM_PSR_Q (1 << 27)
70#define ARM_PSR_J (1 << 24)
71#define ARM_PSR_GE_SHIFT 16
72#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73#define ARM_PSR_E (1 << 9)
74#define ARM_PSR_A (1 << 8)
75#define ARM_PSR_I (1 << 7)
76#define ARM_PSR_F (1 << 6)
77#define ARM_PSR_T (1 << 5)
78#define ARM_PSR_M_SHIFT 0
79#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80#define ARM_PSR_M_USR 0x10
81#define ARM_PSR_M_FIQ 0x11
82#define ARM_PSR_M_IRQ 0x12
83#define ARM_PSR_M_SVC 0x13
84#define ARM_PSR_M_ABT 0x17
[19a9090]85#define ARM_PSR_M_HYP 0x1a
[39c8fdb]86#define ARM_PSR_M_UND 0x1b
87#define ARM_PSR_M_SYS 0x1f
88
[78623bce]89/** @} */
90
91/** @} */
92
[c5ed148]93#endif /* defined(ARM_MULTILIB_ARCH_V4) */
94
[78623bce]95/**
96 * @addtogroup ScoreCPU
97 */
[b697bc6]98/**@{**/
[78623bce]99
[dea10503]100/*
101 *  The ARM uses the PIC interrupt model.
102 */
103#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
104
[08330bf]105#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
106
[c5ed148]107#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
[08330bf]108
[fa237002]109#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
[08330bf]110
[141e16d]111#define CPU_ISR_PASSES_FRAME_POINTER FALSE
[08330bf]112
[cfd8d7a]113#define CPU_HARDWARE_FP FALSE
[08330bf]114
[632e4306]115#define CPU_SOFTWARE_FP FALSE
[08330bf]116
[632e4306]117#define CPU_ALL_TASKS_ARE_FP FALSE
[08330bf]118
[632e4306]119#define CPU_IDLE_TASK_IS_FP FALSE
[08330bf]120
[632e4306]121#define CPU_USE_DEFERRED_FP_SWITCH FALSE
[08330bf]122
[84e6f15]123#if defined(ARM_MULTILIB_ARCH_V7M)
124  #define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
125#else
126  #define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
127#endif
128
[d9bd5cd6]129#if defined(ARM_MULTILIB_HAS_WFI)
[f4539aa]130  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
131#else
132  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
133#endif
[08330bf]134
[632e4306]135#define CPU_STACK_GROWS_UP FALSE
[08330bf]136
[0cb50ab]137#if defined(ARM_MULTILIB_CACHE_LINE_MAX_64)
138  #define CPU_CACHE_LINE_BYTES 64
[8714684]139#else
140  #define CPU_CACHE_LINE_BYTES 32
[0cb50ab]141#endif
[a8865f8]142
143#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
[08330bf]144
145/*
[632e4306]146 * The interrupt mask disables only normal interrupts (IRQ).
[08330bf]147 *
[632e4306]148 * In order to support fast interrupts (FIQ) such that they can do something
149 * useful, we have to disable the operating system support for FIQs.  Having
150 * operating system support for them would require that FIQs are disabled
151 * during critical sections of the operating system and application.  At this
152 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
153 * the non critical sections of IRQs, so here they would have a small
154 * advantage.  Without operating system support, the FIQs can execute at any
155 * time (of course not during the service of another FIQ). If someone needs
156 * operating system support for a FIQ, she can trigger a software interrupt and
157 * service the request in a two-step process.
[08330bf]158 */
[007bdc4]159#define CPU_MODES_INTERRUPT_MASK 0x1
[08330bf]160
161#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
162
163#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
164
[4db30283]165#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
166
[632e4306]167#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
[08330bf]168
[f1738ed]169/* AAPCS, section 4.1, Fundamental Data Types */
170#define CPU_SIZEOF_POINTER 4
171
[71c8457]172/* AAPCS, section 4.1, Fundamental Data Types */
173#define CPU_ALIGNMENT 8
[08330bf]174
[632e4306]175#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
[08330bf]176
[86820eda]177/* AAPCS, section 4.3.1, Aggregates */
178#define CPU_PARTITION_ALIGNMENT 4
[08330bf]179
[71c8457]180/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
181#define CPU_STACK_ALIGNMENT 8
[08330bf]182
183/*
[632e4306]184 * Bitfield handler macros.
[08330bf]185 *
[632e4306]186 * If we had a particularly fast function for finding the first
187 * bit set in a word, it would go here. Since we don't (*), we'll
188 * just use the universal macros.
[08330bf]189 *
[632e4306]190 * (*) On ARM V5 and later, there's a CLZ function which could be
191 *     used to implement much quicker than the default macro.
[08330bf]192 */
193
[632e4306]194#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
[08330bf]195
[decff899]196#define CPU_MAXIMUM_PROCESSORS 32
197
[78623bce]198/** @} */
199
[022851a]200#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
201  #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
202#endif
203
[8ae37323]204#ifdef ARM_MULTILIB_VFP
[cfd8d7a]205  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
206#endif
207
[d59585d]208#ifdef ARM_MULTILIB_ARCH_V4
[9f225dea]209  #if defined(ARM_MULTILIB_VFP)
[d59585d]210    #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 112
[9f225dea]211  #elif defined(ARM_MULTILIB_HAS_THREAD_ID_REGISTER)
[d59585d]212    #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 48
[9f225dea]213  #else
214    #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 44
[d59585d]215  #endif
216#endif
217
[38b59a6]218#ifdef RTEMS_SMP
[8ae37323]219  #ifdef ARM_MULTILIB_VFP
[d59585d]220    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 116
[38b59a6]221  #else
[d59585d]222    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 52
[38b59a6]223  #endif
224#endif
225
[a6c5a7e0]226#define ARM_EXCEPTION_FRAME_SIZE 80
[cfd8d7a]227
228#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
229
230#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
231
232#define ARM_VFP_CONTEXT_SIZE 264
233
[632e4306]234#ifndef ASM
[2d877aa]235
[632e4306]236#ifdef __cplusplus
237extern "C" {
238#endif
[2d877aa]239
[78623bce]240/**
241 * @addtogroup ScoreCPU
242 */
[b697bc6]243/**@{**/
[661e5de4]244
[632e4306]245typedef struct {
[c5ed148]246#if defined(ARM_MULTILIB_ARCH_V4)
[632e4306]247  uint32_t register_cpsr;
248  uint32_t register_r4;
249  uint32_t register_r5;
250  uint32_t register_r6;
251  uint32_t register_r7;
252  uint32_t register_r8;
253  uint32_t register_r9;
254  uint32_t register_r10;
255  uint32_t register_fp;
256  uint32_t register_sp;
257  uint32_t register_lr;
[5759510]258#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
[c5ed148]259  uint32_t register_r4;
260  uint32_t register_r5;
261  uint32_t register_r6;
262  uint32_t register_r7;
263  uint32_t register_r8;
264  uint32_t register_r9;
265  uint32_t register_r10;
266  uint32_t register_r11;
267  void *register_lr;
268  void *register_sp;
269  uint32_t isr_nest_level;
[f442e6b4]270#else
271  void *register_sp;
[c5ed148]272#endif
[022851a]273#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
274  uint32_t thread_id;
275#endif
[8ae37323]276#ifdef ARM_MULTILIB_VFP
[cfd8d7a]277  uint64_t register_d8;
278  uint64_t register_d9;
279  uint64_t register_d10;
280  uint64_t register_d11;
281  uint64_t register_d12;
282  uint64_t register_d13;
283  uint64_t register_d14;
284  uint64_t register_d15;
285#endif
[d59585d]286#ifdef ARM_MULTILIB_ARCH_V4
287  uint32_t isr_dispatch_disable;
288#endif
[38b59a6]289#ifdef RTEMS_SMP
290  volatile bool is_executing;
291#endif
[632e4306]292} Context_Control;
[661e5de4]293
[632e4306]294typedef struct {
295  /* Not supported */
296} Context_Control_fp;
[661e5de4]297
[78623bce]298extern uint32_t arm_cpu_mode;
299
[1a246d7e]300static inline void _ARM_Data_memory_barrier( void )
301{
[7c90670]302#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
[1a246d7e]303  __asm__ volatile ( "dmb" : : : "memory" );
[39e3e20]304#else
305  RTEMS_COMPILER_MEMORY_BARRIER();
[7c90670]306#endif
[1a246d7e]307}
308
309static inline void _ARM_Data_synchronization_barrier( void )
310{
[7c90670]311#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
[1a246d7e]312  __asm__ volatile ( "dsb" : : : "memory" );
[39e3e20]313#else
314  RTEMS_COMPILER_MEMORY_BARRIER();
[7c90670]315#endif
[1a246d7e]316}
317
318static inline void _ARM_Instruction_synchronization_barrier( void )
319{
[7c90670]320#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
[1a246d7e]321  __asm__ volatile ( "isb" : : : "memory" );
[39e3e20]322#else
323  RTEMS_COMPILER_MEMORY_BARRIER();
[7c90670]324#endif
[1a246d7e]325}
326
[632e4306]327static inline uint32_t arm_interrupt_disable( void )
328{
[f442e6b4]329  uint32_t level;
330
[c5ed148]331#if defined(ARM_MULTILIB_ARCH_V4)
[39c8fdb]332  uint32_t arm_switch_reg;
[632e4306]333
[9f9371f]334  __asm__ volatile (
[39c8fdb]335    ARM_SWITCH_TO_ARM
336    "mrs %[level], cpsr\n"
337    "orr %[arm_switch_reg], %[level], #0x80\n"
338    "msr cpsr, %[arm_switch_reg]\n"
339    ARM_SWITCH_BACK
340    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
[632e4306]341  );
[c5ed148]342#elif defined(ARM_MULTILIB_ARCH_V7M)
343  uint32_t basepri = 0x80;
344
345  __asm__ volatile (
346    "mrs %[level], basepri\n"
347    "msr basepri_max, %[basepri]\n"
348    : [level] "=&r" (level)
349    : [basepri] "r" (basepri)
350  );
[f442e6b4]351#endif
[c5ed148]352
353  return level;
[632e4306]354}
[08330bf]355
[632e4306]356static inline void arm_interrupt_enable( uint32_t level )
357{
[c5ed148]358#if defined(ARM_MULTILIB_ARCH_V4)
[39c8fdb]359  ARM_SWITCH_REGISTERS;
360
[9f9371f]361  __asm__ volatile (
[39c8fdb]362    ARM_SWITCH_TO_ARM
363    "msr cpsr, %[level]\n"
364    ARM_SWITCH_BACK
365    : ARM_SWITCH_OUTPUT
366    : [level] "r" (level)
367  );
[c5ed148]368#elif defined(ARM_MULTILIB_ARCH_V7M)
369  __asm__ volatile (
370    "msr basepri, %[level]\n"
371    :
372    : [level] "r" (level)
373  );
374#endif
[632e4306]375}
[08330bf]376
[632e4306]377static inline void arm_interrupt_flash( uint32_t level )
378{
[c5ed148]379#if defined(ARM_MULTILIB_ARCH_V4)
[39c8fdb]380  uint32_t arm_switch_reg;
[632e4306]381
[9f9371f]382  __asm__ volatile (
[39c8fdb]383    ARM_SWITCH_TO_ARM
384    "mrs %[arm_switch_reg], cpsr\n"
385    "msr cpsr, %[level]\n"
386    "msr cpsr, %[arm_switch_reg]\n"
387    ARM_SWITCH_BACK
388    : [arm_switch_reg] "=&r" (arm_switch_reg)
389    : [level] "r" (level)
[632e4306]390  );
[c5ed148]391#elif defined(ARM_MULTILIB_ARCH_V7M)
392  uint32_t basepri;
393
394  __asm__ volatile (
395    "mrs %[basepri], basepri\n"
396    "msr basepri, %[level]\n"
397    "msr basepri, %[basepri]\n"
398    : [basepri] "=&r" (basepri)
399    : [level] "r" (level)
400  );
401#endif
[632e4306]402}
[4f0b287]403
[632e4306]404#define _CPU_ISR_Disable( _isr_cookie ) \
405  do { \
406    _isr_cookie = arm_interrupt_disable(); \
407  } while (0)
[08330bf]408
[632e4306]409#define _CPU_ISR_Enable( _isr_cookie )  \
410  arm_interrupt_enable( _isr_cookie )
[08330bf]411
[632e4306]412#define _CPU_ISR_Flash( _isr_cookie ) \
413  arm_interrupt_flash( _isr_cookie )
[08330bf]414
[408609f6]415RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
416{
417#if defined(ARM_MULTILIB_ARCH_V4)
418  return ( level & 0x80 ) == 0;
419#elif defined(ARM_MULTILIB_ARCH_V7M)
[bd0d585]420  return level == 0;
[408609f6]421#endif
422}
423
[632e4306]424void _CPU_ISR_Set_level( uint32_t level );
425
426uint32_t _CPU_ISR_Get_level( void );
[08330bf]427
[4f0b287]428void _CPU_Context_Initialize(
[632e4306]429  Context_Control *the_context,
[c5ed148]430  void *stack_area_begin,
431  size_t stack_area_size,
[632e4306]432  uint32_t new_level,
[c5ed148]433  void (*entry_point)( void ),
[022851a]434  bool is_fp,
435  void *tls_area
[4f0b287]436);
[08330bf]437
[632e4306]438#define _CPU_Context_Get_SP( _context ) \
439  (_context)->register_sp
[08330bf]440
[38b59a6]441#ifdef RTEMS_SMP
[11b05f1]442  static inline bool _CPU_Context_Get_is_executing(
443    const Context_Control *context
444  )
445  {
446    return context->is_executing;
447  }
448
449  static inline void _CPU_Context_Set_is_executing(
450    Context_Control *context,
451    bool is_executing
452  )
453  {
454    context->is_executing = is_executing;
455  }
[38b59a6]456#endif
457
[08330bf]458#define _CPU_Context_Restart_self( _the_context ) \
459   _CPU_Context_restore( (_the_context) );
460
461#define _CPU_Context_Initialize_fp( _destination ) \
[632e4306]462  do { \
463    *(*(_destination)) = _CPU_Null_fp_context; \
464  } while (0)
[08330bf]465
[f82752a4]466#define _CPU_Fatal_halt( _source, _err )    \
[fa237002]467   do {                                     \
[5e61c80]468     uint32_t _level;                       \
469     uint32_t _error = _err;                \
[fa237002]470     _CPU_ISR_Disable( _level );            \
[f859d20]471     (void) _level;                         \
472     __asm__ volatile ("mov r0, %0\n"       \
[fa237002]473                   : "=r" (_error)          \
474                   : "0" (_error)           \
475                   : "r0" );                \
[632e4306]476     while (1);                             \
477   } while (0);
[08330bf]478
[43e0599]479/**
[66fffc7]480 * @brief CPU initialization.
[43e0599]481 */
[632e4306]482void _CPU_Initialize( void );
[08330bf]483
484void _CPU_ISR_install_vector(
[632e4306]485  uint32_t vector,
486  proc_ptr new_handler,
487  proc_ptr *old_handler
[08330bf]488);
489
[43e0599]490/**
[66fffc7]491 * @brief CPU switch context.
[43e0599]492 */
[632e4306]493void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
[08330bf]494
[db0df7b]495void _CPU_Context_restore( Context_Control *new_context )
[143696a]496  RTEMS_NO_RETURN;
[c5ed148]497
498#if defined(ARM_MULTILIB_ARCH_V7M)
[bf19dbc3]499  void _ARMV7M_Start_multitasking( Context_Control *heir )
[143696a]500    RTEMS_NO_RETURN;
[c5ed148]501  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
502#endif
[08330bf]503
[39993d6]504void _CPU_Context_volatile_clobber( uintptr_t pattern );
505
506void _CPU_Context_validate( uintptr_t pattern );
507
[f2f211c5]508#ifdef RTEMS_SMP
[53e008b]509  uint32_t _CPU_SMP_Initialize( void );
510
511  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
512
513  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
[4627fcd]514
[c34f94f7]515  void _CPU_SMP_Prepare_start_multitasking( void );
516
[47d60134]517  static inline uint32_t _CPU_SMP_Get_current_processor( void )
[39e51758]518  {
519    uint32_t mpidr;
520
521    /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
[47d60134]522    __asm__ volatile (
[39e51758]523      "mrc p15, 0, %[mpidr], c0, c0, 5\n"
524      : [mpidr] "=&r" (mpidr)
525    );
526
527    return mpidr & 0xffU;
528  }
529
[ca63ae2]530  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
531
[f2f211c5]532  static inline void _ARM_Send_event( void )
533  {
534    __asm__ volatile ( "sev" : : : "memory" );
535  }
536
537  static inline void _ARM_Wait_for_event( void )
538  {
539    __asm__ volatile ( "wfe" : : : "memory" );
540  }
541
[07f6e419]542  static inline void _CPU_SMP_Processor_event_broadcast( void )
[f2f211c5]543  {
544    _ARM_Data_synchronization_barrier();
545    _ARM_Send_event();
546  }
547
[f7740e97]548  static inline void _CPU_SMP_Processor_event_receive( void )
[f2f211c5]549  {
550    _ARM_Wait_for_event();
551    _ARM_Data_memory_barrier();
552  }
553#endif
554
555
[632e4306]556static inline uint32_t CPU_swap_u32( uint32_t value )
[08330bf]557{
[c5ed148]558#if defined(__thumb2__)
559  __asm__ volatile (
560    "rev %0, %0"
561    : "=r" (value)
562    : "0" (value)
563  );
564  return value;
565#elif defined(__thumb__)
[1c62f169]566  uint32_t byte1, byte2, byte3, byte4, swapped;
[632e4306]567
[1c62f169]568  byte4 = (value >> 24) & 0xff;
569  byte3 = (value >> 16) & 0xff;
570  byte2 = (value >> 8)  & 0xff;
[632e4306]571  byte1 =  value & 0xff;
572
[1c62f169]573  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
574  return swapped;
575#else
[632e4306]576  uint32_t tmp = value; /* make compiler warnings go away */
[9f9371f]577  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
[632e4306]578                "BIC %1, %1, #0xff0000\n"
579                "MOV %0, %0, ROR #8\n"
580                "EOR %0, %0, %1, LSR #8\n"
581                : "=r" (value), "=r" (tmp)
[1c62f169]582                : "0" (value), "1" (tmp));
583  return value;
584#endif
[08330bf]585}
586
[632e4306]587static inline uint16_t CPU_swap_u16( uint16_t value )
[fa237002]588{
[c5ed148]589#if defined(__thumb2__)
590  __asm__ volatile (
591    "rev16 %0, %0"
592    : "=r" (value)
593    : "0" (value)
594  );
595  return value;
596#else
[632e4306]597  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
[c5ed148]598#endif
[632e4306]599}
[b32fe793]600
[24bf11e]601typedef uint32_t CPU_Counter_ticks;
602
603CPU_Counter_ticks _CPU_Counter_read( void );
604
605CPU_Counter_ticks _CPU_Counter_difference(
606  CPU_Counter_ticks second,
607  CPU_Counter_ticks first
608);
609
[f4539aa]610#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
611  void *_CPU_Thread_Idle_body( uintptr_t ignored );
612#endif
613
[78623bce]614/** @} */
[b32fe793]615
[78623bce]616/**
617 * @addtogroup ScoreCPUARM
618 */
[b697bc6]619/**@{**/
[632e4306]620
[0d8cde9]621#if defined(ARM_MULTILIB_ARCH_V4)
622
[78623bce]623typedef enum {
624  ARM_EXCEPTION_RESET = 0,
625  ARM_EXCEPTION_UNDEF = 1,
626  ARM_EXCEPTION_SWI = 2,
627  ARM_EXCEPTION_PREF_ABORT = 3,
628  ARM_EXCEPTION_DATA_ABORT = 4,
629  ARM_EXCEPTION_RESERVED = 5,
630  ARM_EXCEPTION_IRQ = 6,
631  ARM_EXCEPTION_FIQ = 7,
[8d687737]632  MAX_EXCEPTIONS = 8,
633  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
[78623bce]634} Arm_symbolic_exception_name;
635
[0d8cde9]636#endif /* defined(ARM_MULTILIB_ARCH_V4) */
[78623bce]637
[cfd8d7a]638typedef struct {
639  uint32_t register_fpexc;
640  uint32_t register_fpscr;
641  uint64_t register_d0;
642  uint64_t register_d1;
643  uint64_t register_d2;
644  uint64_t register_d3;
645  uint64_t register_d4;
646  uint64_t register_d5;
647  uint64_t register_d6;
648  uint64_t register_d7;
649  uint64_t register_d8;
650  uint64_t register_d9;
651  uint64_t register_d10;
652  uint64_t register_d11;
653  uint64_t register_d12;
654  uint64_t register_d13;
655  uint64_t register_d14;
656  uint64_t register_d15;
657  uint64_t register_d16;
658  uint64_t register_d17;
659  uint64_t register_d18;
660  uint64_t register_d19;
661  uint64_t register_d20;
662  uint64_t register_d21;
663  uint64_t register_d22;
664  uint64_t register_d23;
665  uint64_t register_d24;
666  uint64_t register_d25;
667  uint64_t register_d26;
668  uint64_t register_d27;
669  uint64_t register_d28;
670  uint64_t register_d29;
671  uint64_t register_d30;
672  uint64_t register_d31;
673} ARM_VFP_context;
674
[78623bce]675typedef struct {
676  uint32_t register_r0;
677  uint32_t register_r1;
678  uint32_t register_r2;
679  uint32_t register_r3;
[8d687737]680  uint32_t register_r4;
681  uint32_t register_r5;
682  uint32_t register_r6;
683  uint32_t register_r7;
684  uint32_t register_r8;
685  uint32_t register_r9;
686  uint32_t register_r10;
687  uint32_t register_r11;
688  uint32_t register_r12;
689  uint32_t register_sp;
[0d8cde9]690  void *register_lr;
691  void *register_pc;
692#if defined(ARM_MULTILIB_ARCH_V4)
[8d687737]693  uint32_t register_cpsr;
694  Arm_symbolic_exception_name vector;
[5759510]695#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
[0d8cde9]696  uint32_t register_xpsr;
697  uint32_t vector;
698#endif
[cfd8d7a]699  const ARM_VFP_context *vfp_context;
[a6c5a7e0]700  uint32_t reserved_for_stack_alignment;
[78623bce]701} CPU_Exception_frame;
702
[815994f]703void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
704
[50f3c42b]705void _ARM_Exception_default( CPU_Exception_frame *frame );
706
[037b57a]707/*
708 * FIXME: In case your BSP uses this function, then convert it to use
709 * the shared start.S file for ARM.
710 */
711void rtems_exception_init_mngt( void );
712
[0d8cde9]713/** @} */
714
[08330bf]715#ifdef __cplusplus
716}
717#endif
718
[632e4306]719#endif /* ASM */
720
721#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.