source: rtems/cpukit/score/cpu/arm/rtems/score/cpu.h @ d9bd5cd6

4.11
Last change on this file since d9bd5cd6 was d9bd5cd6, checked in by Sebastian Huber <sebastian.huber@…>, on May 22, 2013 at 7:54:34 AM

arm: Add CPU specific idle thread for ARMv7

  • Property mode set to 100644
File size: 13.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief ARM Architecture Support API
5 */
6
7/*
8 *  This include file contains information pertaining to the ARM
9 *  processor.
10 *
11 *  Copyright (c) 2009-2013 embedded brains GmbH.
12 *
13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
15 *  Copyright (c) 2006 OAR Corporation
16 *
17 *  Copyright (c) 2002 Advent Networks, Inc.
18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
25 *  http://www.rtems.com/license/LICENSE.
26 *
27 */
28
29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
31
32#include <rtems/score/types.h>
33#include <rtems/score/arm.h>
34
35#if defined(ARM_MULTILIB_ARCH_V4)
36
37/**
38 * @defgroup ScoreCPUARM ARM Specific Support
39 *
40 * @ingroup ScoreCPU
41 *
42 * @brief ARM specific support.
43 */
44/**@{**/
45
46#if defined(__thumb__) && !defined(__thumb2__)
47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
52#else
53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
58#endif
59
60/**
61 * @name Program Status Register
62 */
63/**@{**/
64
65#define ARM_PSR_N (1 << 31)
66#define ARM_PSR_Z (1 << 30)
67#define ARM_PSR_C (1 << 29)
68#define ARM_PSR_V (1 << 28)
69#define ARM_PSR_Q (1 << 27)
70#define ARM_PSR_J (1 << 24)
71#define ARM_PSR_GE_SHIFT 16
72#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73#define ARM_PSR_E (1 << 9)
74#define ARM_PSR_A (1 << 8)
75#define ARM_PSR_I (1 << 7)
76#define ARM_PSR_F (1 << 6)
77#define ARM_PSR_T (1 << 5)
78#define ARM_PSR_M_SHIFT 0
79#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80#define ARM_PSR_M_USR 0x10
81#define ARM_PSR_M_FIQ 0x11
82#define ARM_PSR_M_IRQ 0x12
83#define ARM_PSR_M_SVC 0x13
84#define ARM_PSR_M_ABT 0x17
85#define ARM_PSR_M_UND 0x1b
86#define ARM_PSR_M_SYS 0x1f
87
88/** @} */
89
90/** @} */
91
92#endif /* defined(ARM_MULTILIB_ARCH_V4) */
93
94/**
95 * @addtogroup ScoreCPU
96 */
97/**@{**/
98
99/* If someone uses THUMB we assume she wants minimal code size */
100#ifdef __thumb__
101  #define CPU_INLINE_ENABLE_DISPATCH FALSE
102#else
103  #define CPU_INLINE_ENABLE_DISPATCH TRUE
104#endif
105
106#if defined(__ARMEL__)
107  #define CPU_BIG_ENDIAN FALSE
108  #define CPU_LITTLE_ENDIAN TRUE
109#elif defined(__ARMEB__)
110  #define CPU_BIG_ENDIAN TRUE
111  #define CPU_LITTLE_ENDIAN FALSE
112#else
113  #error "unknown endianness"
114#endif
115
116#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
117
118/*
119 *  The ARM uses the PIC interrupt model.
120 */
121#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
122
123#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
124
125#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
126
127#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
128
129#define CPU_ISR_PASSES_FRAME_POINTER 0
130
131#define CPU_HARDWARE_FP FALSE
132
133#define CPU_SOFTWARE_FP FALSE
134
135#define CPU_ALL_TASKS_ARE_FP FALSE
136
137#define CPU_IDLE_TASK_IS_FP FALSE
138
139#define CPU_USE_DEFERRED_FP_SWITCH FALSE
140
141#if defined(ARM_MULTILIB_HAS_WFI)
142  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
143#else
144  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
145#endif
146
147#define CPU_STACK_GROWS_UP FALSE
148
149/* XXX Why 32? */
150#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
151
152#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
153
154/*
155 * The interrupt mask disables only normal interrupts (IRQ).
156 *
157 * In order to support fast interrupts (FIQ) such that they can do something
158 * useful, we have to disable the operating system support for FIQs.  Having
159 * operating system support for them would require that FIQs are disabled
160 * during critical sections of the operating system and application.  At this
161 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
162 * the non critical sections of IRQs, so here they would have a small
163 * advantage.  Without operating system support, the FIQs can execute at any
164 * time (of course not during the service of another FIQ). If someone needs
165 * operating system support for a FIQ, she can trigger a software interrupt and
166 * service the request in a two-step process.
167 */
168#define CPU_MODES_INTERRUPT_MASK 0x80
169
170#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
171
172#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
173
174#define CPU_INTERRUPT_NUMBER_OF_VECTORS 8
175
176#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
177
178#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
179
180#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
181
182/* AAPCS, section 4.1, Fundamental Data Types */
183#define CPU_SIZEOF_POINTER 4
184
185/* AAPCS, section 4.1, Fundamental Data Types */
186#define CPU_ALIGNMENT 8
187
188#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
189
190/* AAPCS, section 4.3.1, Aggregates */
191#define CPU_PARTITION_ALIGNMENT 4
192
193/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
194#define CPU_STACK_ALIGNMENT 8
195
196/*
197 * Bitfield handler macros.
198 *
199 * If we had a particularly fast function for finding the first
200 * bit set in a word, it would go here. Since we don't (*), we'll
201 * just use the universal macros.
202 *
203 * (*) On ARM V5 and later, there's a CLZ function which could be
204 *     used to implement much quicker than the default macro.
205 */
206
207#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
208
209#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
210
211/** @} */
212
213#ifdef ARM_MULTILIB_VFP_D32
214  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
215#endif
216
217#define ARM_EXCEPTION_FRAME_SIZE 76
218
219#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
220
221#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
222
223#define ARM_VFP_CONTEXT_SIZE 264
224
225#ifndef ASM
226
227#ifdef __cplusplus
228extern "C" {
229#endif
230
231/**
232 * @addtogroup ScoreCPU
233 */
234/**@{**/
235
236typedef struct {
237#if defined(ARM_MULTILIB_ARCH_V4)
238  uint32_t register_cpsr;
239  uint32_t register_r4;
240  uint32_t register_r5;
241  uint32_t register_r6;
242  uint32_t register_r7;
243  uint32_t register_r8;
244  uint32_t register_r9;
245  uint32_t register_r10;
246  uint32_t register_fp;
247  uint32_t register_sp;
248  uint32_t register_lr;
249#elif defined(ARM_MULTILIB_ARCH_V7M)
250  uint32_t register_r4;
251  uint32_t register_r5;
252  uint32_t register_r6;
253  uint32_t register_r7;
254  uint32_t register_r8;
255  uint32_t register_r9;
256  uint32_t register_r10;
257  uint32_t register_r11;
258  void *register_lr;
259  void *register_sp;
260  uint32_t isr_nest_level;
261#else
262  void *register_sp;
263#endif
264#ifdef ARM_MULTILIB_VFP_D32
265  uint64_t register_d8;
266  uint64_t register_d9;
267  uint64_t register_d10;
268  uint64_t register_d11;
269  uint64_t register_d12;
270  uint64_t register_d13;
271  uint64_t register_d14;
272  uint64_t register_d15;
273#endif
274} Context_Control;
275
276typedef struct {
277  /* Not supported */
278} Context_Control_fp;
279
280extern uint32_t arm_cpu_mode;
281
282static inline uint32_t arm_interrupt_disable( void )
283{
284  uint32_t level;
285
286#if defined(ARM_MULTILIB_ARCH_V4)
287  uint32_t arm_switch_reg;
288
289  __asm__ volatile (
290    ARM_SWITCH_TO_ARM
291    "mrs %[level], cpsr\n"
292    "orr %[arm_switch_reg], %[level], #0x80\n"
293    "msr cpsr, %[arm_switch_reg]\n"
294    ARM_SWITCH_BACK
295    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
296  );
297#elif defined(ARM_MULTILIB_ARCH_V7M)
298  uint32_t basepri = 0x80;
299
300  __asm__ volatile (
301    "mrs %[level], basepri\n"
302    "msr basepri_max, %[basepri]\n"
303    : [level] "=&r" (level)
304    : [basepri] "r" (basepri)
305  );
306#else
307  level = 0;
308#endif
309
310  return level;
311}
312
313static inline void arm_interrupt_enable( uint32_t level )
314{
315#if defined(ARM_MULTILIB_ARCH_V4)
316  ARM_SWITCH_REGISTERS;
317
318  __asm__ volatile (
319    ARM_SWITCH_TO_ARM
320    "msr cpsr, %[level]\n"
321    ARM_SWITCH_BACK
322    : ARM_SWITCH_OUTPUT
323    : [level] "r" (level)
324  );
325#elif defined(ARM_MULTILIB_ARCH_V7M)
326  __asm__ volatile (
327    "msr basepri, %[level]\n"
328    :
329    : [level] "r" (level)
330  );
331#endif
332}
333
334static inline void arm_interrupt_flash( uint32_t level )
335{
336#if defined(ARM_MULTILIB_ARCH_V4)
337  uint32_t arm_switch_reg;
338
339  __asm__ volatile (
340    ARM_SWITCH_TO_ARM
341    "mrs %[arm_switch_reg], cpsr\n"
342    "msr cpsr, %[level]\n"
343    "msr cpsr, %[arm_switch_reg]\n"
344    ARM_SWITCH_BACK
345    : [arm_switch_reg] "=&r" (arm_switch_reg)
346    : [level] "r" (level)
347  );
348#elif defined(ARM_MULTILIB_ARCH_V7M)
349  uint32_t basepri;
350
351  __asm__ volatile (
352    "mrs %[basepri], basepri\n"
353    "msr basepri, %[level]\n"
354    "msr basepri, %[basepri]\n"
355    : [basepri] "=&r" (basepri)
356    : [level] "r" (level)
357  );
358#endif
359}
360
361#define _CPU_ISR_Disable( _isr_cookie ) \
362  do { \
363    _isr_cookie = arm_interrupt_disable(); \
364  } while (0)
365
366#define _CPU_ISR_Enable( _isr_cookie )  \
367  arm_interrupt_enable( _isr_cookie )
368
369#define _CPU_ISR_Flash( _isr_cookie ) \
370  arm_interrupt_flash( _isr_cookie )
371
372void _CPU_ISR_Set_level( uint32_t level );
373
374uint32_t _CPU_ISR_Get_level( void );
375
376void _CPU_Context_Initialize(
377  Context_Control *the_context,
378  void *stack_area_begin,
379  size_t stack_area_size,
380  uint32_t new_level,
381  void (*entry_point)( void ),
382  bool is_fp
383);
384
385#define _CPU_Context_Get_SP( _context ) \
386  (_context)->register_sp
387
388#define _CPU_Context_Restart_self( _the_context ) \
389   _CPU_Context_restore( (_the_context) );
390
391#define _CPU_Context_Fp_start( _base, _offset ) \
392   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
393
394#define _CPU_Context_Initialize_fp( _destination ) \
395  do { \
396    *(*(_destination)) = _CPU_Null_fp_context; \
397  } while (0)
398
399#define _CPU_Fatal_halt( _err )             \
400   do {                                     \
401     uint32_t _level;                       \
402     uint32_t _error = _err;                \
403     _CPU_ISR_Disable( _level );            \
404     __asm__ volatile ("mov r0, %0\n"           \
405                   : "=r" (_error)          \
406                   : "0" (_error)           \
407                   : "r0" );                \
408     while (1);                             \
409   } while (0);
410
411/**
412 * @brief CPU initialization.
413 */
414void _CPU_Initialize( void );
415
416void _CPU_ISR_install_vector(
417  uint32_t vector,
418  proc_ptr new_handler,
419  proc_ptr *old_handler
420);
421
422/**
423 * @brief CPU switch context.
424 */
425void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
426
427void _CPU_Context_restore( Context_Control *new_context )
428  RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
429
430#if defined(ARM_MULTILIB_ARCH_V7M)
431  void _ARMV7M_Start_multitasking( Context_Control *bsp, Context_Control *heir );
432  void _ARMV7M_Stop_multitasking( Context_Control *bsp )
433    RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
434  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
435  #define _CPU_Stop_multitasking _ARMV7M_Stop_multitasking
436#endif
437
438void _CPU_Context_volatile_clobber( uintptr_t pattern );
439
440void _CPU_Context_validate( uintptr_t pattern );
441
442static inline uint32_t CPU_swap_u32( uint32_t value )
443{
444#if defined(__thumb2__)
445  __asm__ volatile (
446    "rev %0, %0"
447    : "=r" (value)
448    : "0" (value)
449  );
450  return value;
451#elif defined(__thumb__)
452  uint32_t byte1, byte2, byte3, byte4, swapped;
453
454  byte4 = (value >> 24) & 0xff;
455  byte3 = (value >> 16) & 0xff;
456  byte2 = (value >> 8)  & 0xff;
457  byte1 =  value & 0xff;
458
459  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
460  return swapped;
461#else
462  uint32_t tmp = value; /* make compiler warnings go away */
463  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
464                "BIC %1, %1, #0xff0000\n"
465                "MOV %0, %0, ROR #8\n"
466                "EOR %0, %0, %1, LSR #8\n"
467                : "=r" (value), "=r" (tmp)
468                : "0" (value), "1" (tmp));
469  return value;
470#endif
471}
472
473static inline uint16_t CPU_swap_u16( uint16_t value )
474{
475#if defined(__thumb2__)
476  __asm__ volatile (
477    "rev16 %0, %0"
478    : "=r" (value)
479    : "0" (value)
480  );
481  return value;
482#else
483  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
484#endif
485}
486
487#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
488  void *_CPU_Thread_Idle_body( uintptr_t ignored );
489#endif
490
491/** @} */
492
493/**
494 * @addtogroup ScoreCPUARM
495 */
496/**@{**/
497
498#if defined(ARM_MULTILIB_ARCH_V4)
499
500typedef enum {
501  ARM_EXCEPTION_RESET = 0,
502  ARM_EXCEPTION_UNDEF = 1,
503  ARM_EXCEPTION_SWI = 2,
504  ARM_EXCEPTION_PREF_ABORT = 3,
505  ARM_EXCEPTION_DATA_ABORT = 4,
506  ARM_EXCEPTION_RESERVED = 5,
507  ARM_EXCEPTION_IRQ = 6,
508  ARM_EXCEPTION_FIQ = 7,
509  MAX_EXCEPTIONS = 8,
510  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
511} Arm_symbolic_exception_name;
512
513#endif /* defined(ARM_MULTILIB_ARCH_V4) */
514
515typedef struct {
516  uint32_t register_fpexc;
517  uint32_t register_fpscr;
518  uint64_t register_d0;
519  uint64_t register_d1;
520  uint64_t register_d2;
521  uint64_t register_d3;
522  uint64_t register_d4;
523  uint64_t register_d5;
524  uint64_t register_d6;
525  uint64_t register_d7;
526  uint64_t register_d8;
527  uint64_t register_d9;
528  uint64_t register_d10;
529  uint64_t register_d11;
530  uint64_t register_d12;
531  uint64_t register_d13;
532  uint64_t register_d14;
533  uint64_t register_d15;
534  uint64_t register_d16;
535  uint64_t register_d17;
536  uint64_t register_d18;
537  uint64_t register_d19;
538  uint64_t register_d20;
539  uint64_t register_d21;
540  uint64_t register_d22;
541  uint64_t register_d23;
542  uint64_t register_d24;
543  uint64_t register_d25;
544  uint64_t register_d26;
545  uint64_t register_d27;
546  uint64_t register_d28;
547  uint64_t register_d29;
548  uint64_t register_d30;
549  uint64_t register_d31;
550} ARM_VFP_context;
551
552typedef struct {
553  uint32_t register_r0;
554  uint32_t register_r1;
555  uint32_t register_r2;
556  uint32_t register_r3;
557  uint32_t register_r4;
558  uint32_t register_r5;
559  uint32_t register_r6;
560  uint32_t register_r7;
561  uint32_t register_r8;
562  uint32_t register_r9;
563  uint32_t register_r10;
564  uint32_t register_r11;
565  uint32_t register_r12;
566  uint32_t register_sp;
567  void *register_lr;
568  void *register_pc;
569#if defined(ARM_MULTILIB_ARCH_V4)
570  uint32_t register_cpsr;
571  Arm_symbolic_exception_name vector;
572#elif defined(ARM_MULTILIB_ARCH_V7M)
573  uint32_t register_xpsr;
574  uint32_t vector;
575#endif
576  const ARM_VFP_context *vfp_context;
577} CPU_Exception_frame;
578
579typedef CPU_Exception_frame CPU_Interrupt_frame;
580
581void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
582
583void _ARM_Exception_default( CPU_Exception_frame *frame );
584
585/** @} */
586
587#ifdef __cplusplus
588}
589#endif
590
591#endif /* ASM */
592
593#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.