source: rtems/cpukit/score/cpu/arm/rtems/score/cpu.h @ f2f211c5

4.11
Last change on this file since f2f211c5 was f2f211c5, checked in by Sebastian Huber <sebastian.huber@…>, on May 31, 2013 at 11:59:34 AM

smp: Add ARM support

  • Property mode set to 100644
File size: 14.6 KB
Line 
1/**
2 * @file
3 *
4 * @brief ARM Architecture Support API
5 */
6
7/*
8 *  This include file contains information pertaining to the ARM
9 *  processor.
10 *
11 *  Copyright (c) 2009-2013 embedded brains GmbH.
12 *
13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
15 *  Copyright (c) 2006 OAR Corporation
16 *
17 *  Copyright (c) 2002 Advent Networks, Inc.
18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
25 *  http://www.rtems.com/license/LICENSE.
26 *
27 */
28
29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
31
32#include <rtems/score/types.h>
33#include <rtems/score/arm.h>
34
35#if defined(ARM_MULTILIB_ARCH_V4)
36
37/**
38 * @defgroup ScoreCPUARM ARM Specific Support
39 *
40 * @ingroup ScoreCPU
41 *
42 * @brief ARM specific support.
43 */
44/**@{**/
45
46#if defined(__thumb__) && !defined(__thumb2__)
47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
52#else
53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
58#endif
59
60/**
61 * @name Program Status Register
62 */
63/**@{**/
64
65#define ARM_PSR_N (1 << 31)
66#define ARM_PSR_Z (1 << 30)
67#define ARM_PSR_C (1 << 29)
68#define ARM_PSR_V (1 << 28)
69#define ARM_PSR_Q (1 << 27)
70#define ARM_PSR_J (1 << 24)
71#define ARM_PSR_GE_SHIFT 16
72#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73#define ARM_PSR_E (1 << 9)
74#define ARM_PSR_A (1 << 8)
75#define ARM_PSR_I (1 << 7)
76#define ARM_PSR_F (1 << 6)
77#define ARM_PSR_T (1 << 5)
78#define ARM_PSR_M_SHIFT 0
79#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80#define ARM_PSR_M_USR 0x10
81#define ARM_PSR_M_FIQ 0x11
82#define ARM_PSR_M_IRQ 0x12
83#define ARM_PSR_M_SVC 0x13
84#define ARM_PSR_M_ABT 0x17
85#define ARM_PSR_M_UND 0x1b
86#define ARM_PSR_M_SYS 0x1f
87
88/** @} */
89
90/** @} */
91
92#endif /* defined(ARM_MULTILIB_ARCH_V4) */
93
94/**
95 * @addtogroup ScoreCPU
96 */
97/**@{**/
98
99/* If someone uses THUMB we assume she wants minimal code size */
100#ifdef __thumb__
101  #define CPU_INLINE_ENABLE_DISPATCH FALSE
102#else
103  #define CPU_INLINE_ENABLE_DISPATCH TRUE
104#endif
105
106#if defined(__ARMEL__)
107  #define CPU_BIG_ENDIAN FALSE
108  #define CPU_LITTLE_ENDIAN TRUE
109#elif defined(__ARMEB__)
110  #define CPU_BIG_ENDIAN TRUE
111  #define CPU_LITTLE_ENDIAN FALSE
112#else
113  #error "unknown endianness"
114#endif
115
116#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
117
118/*
119 *  The ARM uses the PIC interrupt model.
120 */
121#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
122
123#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
124
125#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
126
127#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
128
129#define CPU_ISR_PASSES_FRAME_POINTER 0
130
131#define CPU_HARDWARE_FP FALSE
132
133#define CPU_SOFTWARE_FP FALSE
134
135#define CPU_ALL_TASKS_ARE_FP FALSE
136
137#define CPU_IDLE_TASK_IS_FP FALSE
138
139#define CPU_USE_DEFERRED_FP_SWITCH FALSE
140
141#if defined(ARM_MULTILIB_HAS_WFI)
142  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
143#else
144  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
145#endif
146
147#define CPU_STACK_GROWS_UP FALSE
148
149/* XXX Why 32? */
150#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
151
152#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
153
154/*
155 * The interrupt mask disables only normal interrupts (IRQ).
156 *
157 * In order to support fast interrupts (FIQ) such that they can do something
158 * useful, we have to disable the operating system support for FIQs.  Having
159 * operating system support for them would require that FIQs are disabled
160 * during critical sections of the operating system and application.  At this
161 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
162 * the non critical sections of IRQs, so here they would have a small
163 * advantage.  Without operating system support, the FIQs can execute at any
164 * time (of course not during the service of another FIQ). If someone needs
165 * operating system support for a FIQ, she can trigger a software interrupt and
166 * service the request in a two-step process.
167 */
168#define CPU_MODES_INTERRUPT_MASK 0x80
169
170#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
171
172#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
173
174#define CPU_INTERRUPT_NUMBER_OF_VECTORS 8
175
176#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
177
178#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
179
180#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
181
182/* AAPCS, section 4.1, Fundamental Data Types */
183#define CPU_SIZEOF_POINTER 4
184
185/* AAPCS, section 4.1, Fundamental Data Types */
186#define CPU_ALIGNMENT 8
187
188#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
189
190/* AAPCS, section 4.3.1, Aggregates */
191#define CPU_PARTITION_ALIGNMENT 4
192
193/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
194#define CPU_STACK_ALIGNMENT 8
195
196/*
197 * Bitfield handler macros.
198 *
199 * If we had a particularly fast function for finding the first
200 * bit set in a word, it would go here. Since we don't (*), we'll
201 * just use the universal macros.
202 *
203 * (*) On ARM V5 and later, there's a CLZ function which could be
204 *     used to implement much quicker than the default macro.
205 */
206
207#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
208
209#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
210
211/** @} */
212
213#ifdef ARM_MULTILIB_VFP_D32
214  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
215#endif
216
217#define ARM_EXCEPTION_FRAME_SIZE 76
218
219#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
220
221#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
222
223#define ARM_VFP_CONTEXT_SIZE 264
224
225#ifndef ASM
226
227#ifdef __cplusplus
228extern "C" {
229#endif
230
231/**
232 * @addtogroup ScoreCPU
233 */
234/**@{**/
235
236typedef struct {
237#if defined(ARM_MULTILIB_ARCH_V4)
238  uint32_t register_cpsr;
239  uint32_t register_r4;
240  uint32_t register_r5;
241  uint32_t register_r6;
242  uint32_t register_r7;
243  uint32_t register_r8;
244  uint32_t register_r9;
245  uint32_t register_r10;
246  uint32_t register_fp;
247  uint32_t register_sp;
248  uint32_t register_lr;
249#elif defined(ARM_MULTILIB_ARCH_V7M)
250  uint32_t register_r4;
251  uint32_t register_r5;
252  uint32_t register_r6;
253  uint32_t register_r7;
254  uint32_t register_r8;
255  uint32_t register_r9;
256  uint32_t register_r10;
257  uint32_t register_r11;
258  void *register_lr;
259  void *register_sp;
260  uint32_t isr_nest_level;
261#else
262  void *register_sp;
263#endif
264#ifdef ARM_MULTILIB_VFP_D32
265  uint64_t register_d8;
266  uint64_t register_d9;
267  uint64_t register_d10;
268  uint64_t register_d11;
269  uint64_t register_d12;
270  uint64_t register_d13;
271  uint64_t register_d14;
272  uint64_t register_d15;
273#endif
274} Context_Control;
275
276typedef struct {
277  /* Not supported */
278} Context_Control_fp;
279
280extern uint32_t arm_cpu_mode;
281
282static inline uint32_t arm_interrupt_disable( void )
283{
284  uint32_t level;
285
286#if defined(ARM_MULTILIB_ARCH_V4)
287  uint32_t arm_switch_reg;
288
289  __asm__ volatile (
290    ARM_SWITCH_TO_ARM
291    "mrs %[level], cpsr\n"
292    "orr %[arm_switch_reg], %[level], #0x80\n"
293    "msr cpsr, %[arm_switch_reg]\n"
294    ARM_SWITCH_BACK
295    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
296  );
297#elif defined(ARM_MULTILIB_ARCH_V7M)
298  uint32_t basepri = 0x80;
299
300  __asm__ volatile (
301    "mrs %[level], basepri\n"
302    "msr basepri_max, %[basepri]\n"
303    : [level] "=&r" (level)
304    : [basepri] "r" (basepri)
305  );
306#else
307  level = 0;
308#endif
309
310  return level;
311}
312
313static inline void arm_interrupt_enable( uint32_t level )
314{
315#if defined(ARM_MULTILIB_ARCH_V4)
316  ARM_SWITCH_REGISTERS;
317
318  __asm__ volatile (
319    ARM_SWITCH_TO_ARM
320    "msr cpsr, %[level]\n"
321    ARM_SWITCH_BACK
322    : ARM_SWITCH_OUTPUT
323    : [level] "r" (level)
324  );
325#elif defined(ARM_MULTILIB_ARCH_V7M)
326  __asm__ volatile (
327    "msr basepri, %[level]\n"
328    :
329    : [level] "r" (level)
330  );
331#endif
332}
333
334static inline void arm_interrupt_flash( uint32_t level )
335{
336#if defined(ARM_MULTILIB_ARCH_V4)
337  uint32_t arm_switch_reg;
338
339  __asm__ volatile (
340    ARM_SWITCH_TO_ARM
341    "mrs %[arm_switch_reg], cpsr\n"
342    "msr cpsr, %[level]\n"
343    "msr cpsr, %[arm_switch_reg]\n"
344    ARM_SWITCH_BACK
345    : [arm_switch_reg] "=&r" (arm_switch_reg)
346    : [level] "r" (level)
347  );
348#elif defined(ARM_MULTILIB_ARCH_V7M)
349  uint32_t basepri;
350
351  __asm__ volatile (
352    "mrs %[basepri], basepri\n"
353    "msr basepri, %[level]\n"
354    "msr basepri, %[basepri]\n"
355    : [basepri] "=&r" (basepri)
356    : [level] "r" (level)
357  );
358#endif
359}
360
361#define _CPU_ISR_Disable( _isr_cookie ) \
362  do { \
363    _isr_cookie = arm_interrupt_disable(); \
364  } while (0)
365
366#define _CPU_ISR_Enable( _isr_cookie )  \
367  arm_interrupt_enable( _isr_cookie )
368
369#define _CPU_ISR_Flash( _isr_cookie ) \
370  arm_interrupt_flash( _isr_cookie )
371
372void _CPU_ISR_Set_level( uint32_t level );
373
374uint32_t _CPU_ISR_Get_level( void );
375
376void _CPU_Context_Initialize(
377  Context_Control *the_context,
378  void *stack_area_begin,
379  size_t stack_area_size,
380  uint32_t new_level,
381  void (*entry_point)( void ),
382  bool is_fp
383);
384
385#define _CPU_Context_Get_SP( _context ) \
386  (_context)->register_sp
387
388#define _CPU_Context_Restart_self( _the_context ) \
389   _CPU_Context_restore( (_the_context) );
390
391#define _CPU_Context_Fp_start( _base, _offset ) \
392   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
393
394#define _CPU_Context_Initialize_fp( _destination ) \
395  do { \
396    *(*(_destination)) = _CPU_Null_fp_context; \
397  } while (0)
398
399#define _CPU_Fatal_halt( _err )             \
400   do {                                     \
401     uint32_t _level;                       \
402     uint32_t _error = _err;                \
403     _CPU_ISR_Disable( _level );            \
404     __asm__ volatile ("mov r0, %0\n"           \
405                   : "=r" (_error)          \
406                   : "0" (_error)           \
407                   : "r0" );                \
408     while (1);                             \
409   } while (0);
410
411/**
412 * @brief CPU initialization.
413 */
414void _CPU_Initialize( void );
415
416void _CPU_ISR_install_vector(
417  uint32_t vector,
418  proc_ptr new_handler,
419  proc_ptr *old_handler
420);
421
422/**
423 * @brief CPU switch context.
424 */
425void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
426
427void _CPU_Context_restore( Context_Control *new_context )
428  RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
429
430#if defined(ARM_MULTILIB_ARCH_V7M)
431  void _ARMV7M_Start_multitasking( Context_Control *bsp, Context_Control *heir );
432  void _ARMV7M_Stop_multitasking( Context_Control *bsp )
433    RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
434  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
435  #define _CPU_Stop_multitasking _ARMV7M_Stop_multitasking
436#endif
437
438void _CPU_Context_volatile_clobber( uintptr_t pattern );
439
440void _CPU_Context_validate( uintptr_t pattern );
441
442#ifdef RTEMS_SMP
443  #define _CPU_Context_switch_to_first_task_smp( _context ) \
444    _CPU_Context_restore( _context )
445
446  static inline void _ARM_Data_memory_barrier( void )
447  {
448    __asm__ volatile ( "dmb" : : : "memory" );
449  }
450
451  static inline void _ARM_Data_synchronization_barrier( void )
452  {
453    __asm__ volatile ( "dsb" : : : "memory" );
454  }
455
456  static inline void _ARM_Send_event( void )
457  {
458    __asm__ volatile ( "sev" : : : "memory" );
459  }
460
461  static inline void _ARM_Wait_for_event( void )
462  {
463    __asm__ volatile ( "wfe" : : : "memory" );
464  }
465
466  static inline void _CPU_Processor_event_broadcast( void )
467  {
468    _ARM_Data_synchronization_barrier();
469    _ARM_Send_event();
470  }
471
472  static inline void _CPU_Processor_event_receive( void )
473  {
474    _ARM_Wait_for_event();
475    _ARM_Data_memory_barrier();
476  }
477#endif
478
479
480static inline uint32_t CPU_swap_u32( uint32_t value )
481{
482#if defined(__thumb2__)
483  __asm__ volatile (
484    "rev %0, %0"
485    : "=r" (value)
486    : "0" (value)
487  );
488  return value;
489#elif defined(__thumb__)
490  uint32_t byte1, byte2, byte3, byte4, swapped;
491
492  byte4 = (value >> 24) & 0xff;
493  byte3 = (value >> 16) & 0xff;
494  byte2 = (value >> 8)  & 0xff;
495  byte1 =  value & 0xff;
496
497  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
498  return swapped;
499#else
500  uint32_t tmp = value; /* make compiler warnings go away */
501  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
502                "BIC %1, %1, #0xff0000\n"
503                "MOV %0, %0, ROR #8\n"
504                "EOR %0, %0, %1, LSR #8\n"
505                : "=r" (value), "=r" (tmp)
506                : "0" (value), "1" (tmp));
507  return value;
508#endif
509}
510
511static inline uint16_t CPU_swap_u16( uint16_t value )
512{
513#if defined(__thumb2__)
514  __asm__ volatile (
515    "rev16 %0, %0"
516    : "=r" (value)
517    : "0" (value)
518  );
519  return value;
520#else
521  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
522#endif
523}
524
525#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
526  void *_CPU_Thread_Idle_body( uintptr_t ignored );
527#endif
528
529/** @} */
530
531/**
532 * @addtogroup ScoreCPUARM
533 */
534/**@{**/
535
536#if defined(ARM_MULTILIB_ARCH_V4)
537
538typedef enum {
539  ARM_EXCEPTION_RESET = 0,
540  ARM_EXCEPTION_UNDEF = 1,
541  ARM_EXCEPTION_SWI = 2,
542  ARM_EXCEPTION_PREF_ABORT = 3,
543  ARM_EXCEPTION_DATA_ABORT = 4,
544  ARM_EXCEPTION_RESERVED = 5,
545  ARM_EXCEPTION_IRQ = 6,
546  ARM_EXCEPTION_FIQ = 7,
547  MAX_EXCEPTIONS = 8,
548  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
549} Arm_symbolic_exception_name;
550
551#endif /* defined(ARM_MULTILIB_ARCH_V4) */
552
553typedef struct {
554  uint32_t register_fpexc;
555  uint32_t register_fpscr;
556  uint64_t register_d0;
557  uint64_t register_d1;
558  uint64_t register_d2;
559  uint64_t register_d3;
560  uint64_t register_d4;
561  uint64_t register_d5;
562  uint64_t register_d6;
563  uint64_t register_d7;
564  uint64_t register_d8;
565  uint64_t register_d9;
566  uint64_t register_d10;
567  uint64_t register_d11;
568  uint64_t register_d12;
569  uint64_t register_d13;
570  uint64_t register_d14;
571  uint64_t register_d15;
572  uint64_t register_d16;
573  uint64_t register_d17;
574  uint64_t register_d18;
575  uint64_t register_d19;
576  uint64_t register_d20;
577  uint64_t register_d21;
578  uint64_t register_d22;
579  uint64_t register_d23;
580  uint64_t register_d24;
581  uint64_t register_d25;
582  uint64_t register_d26;
583  uint64_t register_d27;
584  uint64_t register_d28;
585  uint64_t register_d29;
586  uint64_t register_d30;
587  uint64_t register_d31;
588} ARM_VFP_context;
589
590typedef struct {
591  uint32_t register_r0;
592  uint32_t register_r1;
593  uint32_t register_r2;
594  uint32_t register_r3;
595  uint32_t register_r4;
596  uint32_t register_r5;
597  uint32_t register_r6;
598  uint32_t register_r7;
599  uint32_t register_r8;
600  uint32_t register_r9;
601  uint32_t register_r10;
602  uint32_t register_r11;
603  uint32_t register_r12;
604  uint32_t register_sp;
605  void *register_lr;
606  void *register_pc;
607#if defined(ARM_MULTILIB_ARCH_V4)
608  uint32_t register_cpsr;
609  Arm_symbolic_exception_name vector;
610#elif defined(ARM_MULTILIB_ARCH_V7M)
611  uint32_t register_xpsr;
612  uint32_t vector;
613#endif
614  const ARM_VFP_context *vfp_context;
615} CPU_Exception_frame;
616
617typedef CPU_Exception_frame CPU_Interrupt_frame;
618
619void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
620
621void _ARM_Exception_default( CPU_Exception_frame *frame );
622
623/** @} */
624
625#ifdef __cplusplus
626}
627#endif
628
629#endif /* ASM */
630
631#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.