source: rtems/cpukit/score/cpu/arm/rtems/score/cpu.h @ 03b7789

4.11
Last change on this file since 03b7789 was 03b7789, checked in by Sebastian Huber <sebastian.huber@…>, on Apr 26, 2014 at 1:09:10 PM

score: Statically initialize _ISR_Vector_table

  • Property mode set to 100644
File size: 15.3 KB
Line 
1/**
2 * @file
3 *
4 * @brief ARM Architecture Support API
5 */
6
7/*
8 *  This include file contains information pertaining to the ARM
9 *  processor.
10 *
11 *  Copyright (c) 2009-2013 embedded brains GmbH.
12 *
13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
15 *  Copyright (c) 2006 OAR Corporation
16 *
17 *  Copyright (c) 2002 Advent Networks, Inc.
18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
25 *  http://www.rtems.org/license/LICENSE.
26 *
27 */
28
29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
31
32#include <rtems/score/types.h>
33#include <rtems/score/arm.h>
34
35#if defined(ARM_MULTILIB_ARCH_V4)
36
37/**
38 * @defgroup ScoreCPUARM ARM Specific Support
39 *
40 * @ingroup ScoreCPU
41 *
42 * @brief ARM specific support.
43 */
44/**@{**/
45
46#if defined(__thumb__) && !defined(__thumb2__)
47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
52#else
53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
58#endif
59
60/**
61 * @name Program Status Register
62 */
63/**@{**/
64
65#define ARM_PSR_N (1 << 31)
66#define ARM_PSR_Z (1 << 30)
67#define ARM_PSR_C (1 << 29)
68#define ARM_PSR_V (1 << 28)
69#define ARM_PSR_Q (1 << 27)
70#define ARM_PSR_J (1 << 24)
71#define ARM_PSR_GE_SHIFT 16
72#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73#define ARM_PSR_E (1 << 9)
74#define ARM_PSR_A (1 << 8)
75#define ARM_PSR_I (1 << 7)
76#define ARM_PSR_F (1 << 6)
77#define ARM_PSR_T (1 << 5)
78#define ARM_PSR_M_SHIFT 0
79#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80#define ARM_PSR_M_USR 0x10
81#define ARM_PSR_M_FIQ 0x11
82#define ARM_PSR_M_IRQ 0x12
83#define ARM_PSR_M_SVC 0x13
84#define ARM_PSR_M_ABT 0x17
85#define ARM_PSR_M_UND 0x1b
86#define ARM_PSR_M_SYS 0x1f
87
88/** @} */
89
90/** @} */
91
92#endif /* defined(ARM_MULTILIB_ARCH_V4) */
93
94/**
95 * @addtogroup ScoreCPU
96 */
97/**@{**/
98
99/* If someone uses THUMB we assume she wants minimal code size */
100#ifdef __thumb__
101  #define CPU_INLINE_ENABLE_DISPATCH FALSE
102#else
103  #define CPU_INLINE_ENABLE_DISPATCH TRUE
104#endif
105
106#if defined(__ARMEL__)
107  #define CPU_BIG_ENDIAN FALSE
108  #define CPU_LITTLE_ENDIAN TRUE
109#elif defined(__ARMEB__)
110  #define CPU_BIG_ENDIAN TRUE
111  #define CPU_LITTLE_ENDIAN FALSE
112#else
113  #error "unknown endianness"
114#endif
115
116#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
117
118/*
119 *  The ARM uses the PIC interrupt model.
120 */
121#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
122
123#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
124
125#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
126
127#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
128
129#define CPU_ISR_PASSES_FRAME_POINTER 0
130
131#define CPU_HARDWARE_FP FALSE
132
133#define CPU_SOFTWARE_FP FALSE
134
135#define CPU_ALL_TASKS_ARE_FP FALSE
136
137#define CPU_IDLE_TASK_IS_FP FALSE
138
139#define CPU_USE_DEFERRED_FP_SWITCH FALSE
140
141#if defined(ARM_MULTILIB_HAS_WFI)
142  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
143#else
144  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
145#endif
146
147#define CPU_STACK_GROWS_UP FALSE
148
149/* XXX Why 32? */
150#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
151
152#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
153
154/*
155 * The interrupt mask disables only normal interrupts (IRQ).
156 *
157 * In order to support fast interrupts (FIQ) such that they can do something
158 * useful, we have to disable the operating system support for FIQs.  Having
159 * operating system support for them would require that FIQs are disabled
160 * during critical sections of the operating system and application.  At this
161 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
162 * the non critical sections of IRQs, so here they would have a small
163 * advantage.  Without operating system support, the FIQs can execute at any
164 * time (of course not during the service of another FIQ). If someone needs
165 * operating system support for a FIQ, she can trigger a software interrupt and
166 * service the request in a two-step process.
167 */
168#define CPU_MODES_INTERRUPT_MASK 0x1
169
170#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
171
172#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
173
174#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
175
176#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
177
178/* AAPCS, section 4.1, Fundamental Data Types */
179#define CPU_SIZEOF_POINTER 4
180
181/* AAPCS, section 4.1, Fundamental Data Types */
182#define CPU_ALIGNMENT 8
183
184#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
185
186/* AAPCS, section 4.3.1, Aggregates */
187#define CPU_PARTITION_ALIGNMENT 4
188
189/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
190#define CPU_STACK_ALIGNMENT 8
191
192/*
193 * Bitfield handler macros.
194 *
195 * If we had a particularly fast function for finding the first
196 * bit set in a word, it would go here. Since we don't (*), we'll
197 * just use the universal macros.
198 *
199 * (*) On ARM V5 and later, there's a CLZ function which could be
200 *     used to implement much quicker than the default macro.
201 */
202
203#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
204
205#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
206
207#define CPU_PER_CPU_CONTROL_SIZE 0
208
209/** @} */
210
211#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
212  #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
213#endif
214
215#ifdef ARM_MULTILIB_VFP_D32
216  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
217#endif
218
219#define ARM_EXCEPTION_FRAME_SIZE 76
220
221#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
222
223#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
224
225#define ARM_VFP_CONTEXT_SIZE 264
226
227#ifndef ASM
228
229#ifdef __cplusplus
230extern "C" {
231#endif
232
233/**
234 * @addtogroup ScoreCPU
235 */
236/**@{**/
237
238typedef struct {
239  /* There is no CPU specific per-CPU state */
240} CPU_Per_CPU_control;
241
242typedef struct {
243#if defined(ARM_MULTILIB_ARCH_V4)
244  uint32_t register_cpsr;
245  uint32_t register_r4;
246  uint32_t register_r5;
247  uint32_t register_r6;
248  uint32_t register_r7;
249  uint32_t register_r8;
250  uint32_t register_r9;
251  uint32_t register_r10;
252  uint32_t register_fp;
253  uint32_t register_sp;
254  uint32_t register_lr;
255#elif defined(ARM_MULTILIB_ARCH_V7M)
256  uint32_t register_r4;
257  uint32_t register_r5;
258  uint32_t register_r6;
259  uint32_t register_r7;
260  uint32_t register_r8;
261  uint32_t register_r9;
262  uint32_t register_r10;
263  uint32_t register_r11;
264  void *register_lr;
265  void *register_sp;
266  uint32_t isr_nest_level;
267#else
268  void *register_sp;
269#endif
270#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
271  uint32_t thread_id;
272#endif
273#ifdef ARM_MULTILIB_VFP_D32
274  uint64_t register_d8;
275  uint64_t register_d9;
276  uint64_t register_d10;
277  uint64_t register_d11;
278  uint64_t register_d12;
279  uint64_t register_d13;
280  uint64_t register_d14;
281  uint64_t register_d15;
282#endif
283} Context_Control;
284
285typedef struct {
286  /* Not supported */
287} Context_Control_fp;
288
289extern uint32_t arm_cpu_mode;
290
291static inline void _ARM_Data_memory_barrier( void )
292{
293  __asm__ volatile ( "dmb" : : : "memory" );
294}
295
296static inline void _ARM_Data_synchronization_barrier( void )
297{
298  __asm__ volatile ( "dsb" : : : "memory" );
299}
300
301static inline void _ARM_Instruction_synchronization_barrier( void )
302{
303  __asm__ volatile ( "isb" : : : "memory" );
304}
305
306static inline uint32_t arm_interrupt_disable( void )
307{
308  uint32_t level;
309
310#if defined(ARM_MULTILIB_ARCH_V4)
311  uint32_t arm_switch_reg;
312
313  __asm__ volatile (
314    ARM_SWITCH_TO_ARM
315    "mrs %[level], cpsr\n"
316    "orr %[arm_switch_reg], %[level], #0x80\n"
317    "msr cpsr, %[arm_switch_reg]\n"
318    ARM_SWITCH_BACK
319    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
320  );
321#elif defined(ARM_MULTILIB_ARCH_V7M)
322  uint32_t basepri = 0x80;
323
324  __asm__ volatile (
325    "mrs %[level], basepri\n"
326    "msr basepri_max, %[basepri]\n"
327    : [level] "=&r" (level)
328    : [basepri] "r" (basepri)
329  );
330#else
331  level = 0;
332#endif
333
334  return level;
335}
336
337static inline void arm_interrupt_enable( uint32_t level )
338{
339#if defined(ARM_MULTILIB_ARCH_V4)
340  ARM_SWITCH_REGISTERS;
341
342  __asm__ volatile (
343    ARM_SWITCH_TO_ARM
344    "msr cpsr, %[level]\n"
345    ARM_SWITCH_BACK
346    : ARM_SWITCH_OUTPUT
347    : [level] "r" (level)
348  );
349#elif defined(ARM_MULTILIB_ARCH_V7M)
350  __asm__ volatile (
351    "msr basepri, %[level]\n"
352    :
353    : [level] "r" (level)
354  );
355#endif
356}
357
358static inline void arm_interrupt_flash( uint32_t level )
359{
360#if defined(ARM_MULTILIB_ARCH_V4)
361  uint32_t arm_switch_reg;
362
363  __asm__ volatile (
364    ARM_SWITCH_TO_ARM
365    "mrs %[arm_switch_reg], cpsr\n"
366    "msr cpsr, %[level]\n"
367    "msr cpsr, %[arm_switch_reg]\n"
368    ARM_SWITCH_BACK
369    : [arm_switch_reg] "=&r" (arm_switch_reg)
370    : [level] "r" (level)
371  );
372#elif defined(ARM_MULTILIB_ARCH_V7M)
373  uint32_t basepri;
374
375  __asm__ volatile (
376    "mrs %[basepri], basepri\n"
377    "msr basepri, %[level]\n"
378    "msr basepri, %[basepri]\n"
379    : [basepri] "=&r" (basepri)
380    : [level] "r" (level)
381  );
382#endif
383}
384
385#define _CPU_ISR_Disable( _isr_cookie ) \
386  do { \
387    _isr_cookie = arm_interrupt_disable(); \
388  } while (0)
389
390#define _CPU_ISR_Enable( _isr_cookie )  \
391  arm_interrupt_enable( _isr_cookie )
392
393#define _CPU_ISR_Flash( _isr_cookie ) \
394  arm_interrupt_flash( _isr_cookie )
395
396void _CPU_ISR_Set_level( uint32_t level );
397
398uint32_t _CPU_ISR_Get_level( void );
399
400void _CPU_Context_Initialize(
401  Context_Control *the_context,
402  void *stack_area_begin,
403  size_t stack_area_size,
404  uint32_t new_level,
405  void (*entry_point)( void ),
406  bool is_fp,
407  void *tls_area
408);
409
410#define _CPU_Context_Get_SP( _context ) \
411  (_context)->register_sp
412
413#define _CPU_Context_Restart_self( _the_context ) \
414   _CPU_Context_restore( (_the_context) );
415
416#define _CPU_Context_Fp_start( _base, _offset ) \
417   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
418
419#define _CPU_Context_Initialize_fp( _destination ) \
420  do { \
421    *(*(_destination)) = _CPU_Null_fp_context; \
422  } while (0)
423
424#define _CPU_Fatal_halt( _err )             \
425   do {                                     \
426     uint32_t _level;                       \
427     uint32_t _error = _err;                \
428     _CPU_ISR_Disable( _level );            \
429     (void) _level;                         \
430     __asm__ volatile ("mov r0, %0\n"       \
431                   : "=r" (_error)          \
432                   : "0" (_error)           \
433                   : "r0" );                \
434     while (1);                             \
435   } while (0);
436
437/**
438 * @brief CPU initialization.
439 */
440void _CPU_Initialize( void );
441
442void _CPU_ISR_install_vector(
443  uint32_t vector,
444  proc_ptr new_handler,
445  proc_ptr *old_handler
446);
447
448/**
449 * @brief CPU switch context.
450 */
451void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
452
453void _CPU_Context_restore( Context_Control *new_context )
454  RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
455
456#if defined(ARM_MULTILIB_ARCH_V7M)
457  void _ARMV7M_Start_multitasking( Context_Control *heir );
458  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
459#endif
460
461void _CPU_Context_volatile_clobber( uintptr_t pattern );
462
463void _CPU_Context_validate( uintptr_t pattern );
464
465#ifdef RTEMS_SMP
466  uint32_t _CPU_SMP_Initialize( void );
467
468  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
469
470  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
471
472  static inline uint32_t _CPU_SMP_Get_current_processor( void )
473  {
474    uint32_t mpidr;
475
476    /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
477    __asm__ volatile (
478      "mrc p15, 0, %[mpidr], c0, c0, 5\n"
479      : [mpidr] "=&r" (mpidr)
480    );
481
482    return mpidr & 0xffU;
483  }
484
485  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
486
487  static inline void _ARM_Send_event( void )
488  {
489    __asm__ volatile ( "sev" : : : "memory" );
490  }
491
492  static inline void _ARM_Wait_for_event( void )
493  {
494    __asm__ volatile ( "wfe" : : : "memory" );
495  }
496
497  static inline void _CPU_SMP_Processor_event_broadcast( void )
498  {
499    _ARM_Data_synchronization_barrier();
500    _ARM_Send_event();
501  }
502
503  static inline void _CPU_SMP_Processor_event_receive( void )
504  {
505    _ARM_Wait_for_event();
506    _ARM_Data_memory_barrier();
507  }
508#endif
509
510
511static inline uint32_t CPU_swap_u32( uint32_t value )
512{
513#if defined(__thumb2__)
514  __asm__ volatile (
515    "rev %0, %0"
516    : "=r" (value)
517    : "0" (value)
518  );
519  return value;
520#elif defined(__thumb__)
521  uint32_t byte1, byte2, byte3, byte4, swapped;
522
523  byte4 = (value >> 24) & 0xff;
524  byte3 = (value >> 16) & 0xff;
525  byte2 = (value >> 8)  & 0xff;
526  byte1 =  value & 0xff;
527
528  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
529  return swapped;
530#else
531  uint32_t tmp = value; /* make compiler warnings go away */
532  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
533                "BIC %1, %1, #0xff0000\n"
534                "MOV %0, %0, ROR #8\n"
535                "EOR %0, %0, %1, LSR #8\n"
536                : "=r" (value), "=r" (tmp)
537                : "0" (value), "1" (tmp));
538  return value;
539#endif
540}
541
542static inline uint16_t CPU_swap_u16( uint16_t value )
543{
544#if defined(__thumb2__)
545  __asm__ volatile (
546    "rev16 %0, %0"
547    : "=r" (value)
548    : "0" (value)
549  );
550  return value;
551#else
552  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
553#endif
554}
555
556typedef uint32_t CPU_Counter_ticks;
557
558CPU_Counter_ticks _CPU_Counter_read( void );
559
560CPU_Counter_ticks _CPU_Counter_difference(
561  CPU_Counter_ticks second,
562  CPU_Counter_ticks first
563);
564
565#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
566  void *_CPU_Thread_Idle_body( uintptr_t ignored );
567#endif
568
569/** @} */
570
571/**
572 * @addtogroup ScoreCPUARM
573 */
574/**@{**/
575
576#if defined(ARM_MULTILIB_ARCH_V4)
577
578typedef enum {
579  ARM_EXCEPTION_RESET = 0,
580  ARM_EXCEPTION_UNDEF = 1,
581  ARM_EXCEPTION_SWI = 2,
582  ARM_EXCEPTION_PREF_ABORT = 3,
583  ARM_EXCEPTION_DATA_ABORT = 4,
584  ARM_EXCEPTION_RESERVED = 5,
585  ARM_EXCEPTION_IRQ = 6,
586  ARM_EXCEPTION_FIQ = 7,
587  MAX_EXCEPTIONS = 8,
588  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
589} Arm_symbolic_exception_name;
590
591#endif /* defined(ARM_MULTILIB_ARCH_V4) */
592
593typedef struct {
594  uint32_t register_fpexc;
595  uint32_t register_fpscr;
596  uint64_t register_d0;
597  uint64_t register_d1;
598  uint64_t register_d2;
599  uint64_t register_d3;
600  uint64_t register_d4;
601  uint64_t register_d5;
602  uint64_t register_d6;
603  uint64_t register_d7;
604  uint64_t register_d8;
605  uint64_t register_d9;
606  uint64_t register_d10;
607  uint64_t register_d11;
608  uint64_t register_d12;
609  uint64_t register_d13;
610  uint64_t register_d14;
611  uint64_t register_d15;
612  uint64_t register_d16;
613  uint64_t register_d17;
614  uint64_t register_d18;
615  uint64_t register_d19;
616  uint64_t register_d20;
617  uint64_t register_d21;
618  uint64_t register_d22;
619  uint64_t register_d23;
620  uint64_t register_d24;
621  uint64_t register_d25;
622  uint64_t register_d26;
623  uint64_t register_d27;
624  uint64_t register_d28;
625  uint64_t register_d29;
626  uint64_t register_d30;
627  uint64_t register_d31;
628} ARM_VFP_context;
629
630typedef struct {
631  uint32_t register_r0;
632  uint32_t register_r1;
633  uint32_t register_r2;
634  uint32_t register_r3;
635  uint32_t register_r4;
636  uint32_t register_r5;
637  uint32_t register_r6;
638  uint32_t register_r7;
639  uint32_t register_r8;
640  uint32_t register_r9;
641  uint32_t register_r10;
642  uint32_t register_r11;
643  uint32_t register_r12;
644  uint32_t register_sp;
645  void *register_lr;
646  void *register_pc;
647#if defined(ARM_MULTILIB_ARCH_V4)
648  uint32_t register_cpsr;
649  Arm_symbolic_exception_name vector;
650#elif defined(ARM_MULTILIB_ARCH_V7M)
651  uint32_t register_xpsr;
652  uint32_t vector;
653#endif
654  const ARM_VFP_context *vfp_context;
655} CPU_Exception_frame;
656
657typedef CPU_Exception_frame CPU_Interrupt_frame;
658
659void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
660
661void _ARM_Exception_default( CPU_Exception_frame *frame );
662
663/** @} */
664
665#ifdef __cplusplus
666}
667#endif
668
669#endif /* ASM */
670
671#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.