source: rtems/cpukit/score/cpu/arm/rtems/score/cpu.h @ aedeb57

4.115
Last change on this file since aedeb57 was 2bbea4dd, checked in by Sebastian Huber <sebastian.huber@…>, on 05/03/13 at 14:14:45

arm: Switch to ARM only for Thumb-1

The Thumb-2 instruction set as encodings for the relevant instructions.

  • Property mode set to 100644
File size: 12.6 KB
Line 
1/**
2 * @file
3 *
4 * @brief ARM Architecture Support API
5 */
6
7/*
8 *  This include file contains information pertaining to the ARM
9 *  processor.
10 *
11 *  Copyright (c) 2009-2011 embedded brains GmbH.
12 *
13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
15 *  Copyright (c) 2006 OAR Corporation
16 *
17 *  Copyright (c) 2002 Advent Networks, Inc.
18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
25 *  http://www.rtems.com/license/LICENSE.
26 *
27 */
28
29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
31
32#include <rtems/score/types.h>
33#include <rtems/score/arm.h>
34
35#if defined(ARM_MULTILIB_ARCH_V4)
36
37/**
38 * @defgroup ScoreCPUARM ARM Specific Support
39 *
40 * @ingroup ScoreCPU
41 *
42 * @brief ARM specific support.
43 */
44/**@{**/
45
46#if defined(__thumb__) && !defined(__thumb2__)
47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
52#else
53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
58#endif
59
60/**
61 * @name Program Status Register
62 */
63/**@{**/
64
65#define ARM_PSR_N (1 << 31)
66#define ARM_PSR_Z (1 << 30)
67#define ARM_PSR_C (1 << 29)
68#define ARM_PSR_V (1 << 28)
69#define ARM_PSR_Q (1 << 27)
70#define ARM_PSR_J (1 << 24)
71#define ARM_PSR_GE_SHIFT 16
72#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73#define ARM_PSR_E (1 << 9)
74#define ARM_PSR_A (1 << 8)
75#define ARM_PSR_I (1 << 7)
76#define ARM_PSR_F (1 << 6)
77#define ARM_PSR_T (1 << 5)
78#define ARM_PSR_M_SHIFT 0
79#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80#define ARM_PSR_M_USR 0x10
81#define ARM_PSR_M_FIQ 0x11
82#define ARM_PSR_M_IRQ 0x12
83#define ARM_PSR_M_SVC 0x13
84#define ARM_PSR_M_ABT 0x17
85#define ARM_PSR_M_UND 0x1b
86#define ARM_PSR_M_SYS 0x1f
87
88/** @} */
89
90/** @} */
91
92#endif /* defined(ARM_MULTILIB_ARCH_V4) */
93
94/**
95 * @addtogroup ScoreCPU
96 */
97/**@{**/
98
99/* If someone uses THUMB we assume she wants minimal code size */
100#ifdef __thumb__
101  #define CPU_INLINE_ENABLE_DISPATCH FALSE
102#else
103  #define CPU_INLINE_ENABLE_DISPATCH TRUE
104#endif
105
106#if defined(__ARMEL__)
107  #define CPU_BIG_ENDIAN FALSE
108  #define CPU_LITTLE_ENDIAN TRUE
109#elif defined(__ARMEB__)
110  #define CPU_BIG_ENDIAN TRUE
111  #define CPU_LITTLE_ENDIAN FALSE
112#else
113  #error "unknown endianness"
114#endif
115
116#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
117
118/*
119 *  The ARM uses the PIC interrupt model.
120 */
121#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
122
123#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
124
125#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
126
127#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
128
129#define CPU_ISR_PASSES_FRAME_POINTER 0
130
131#if ( ARM_HAS_FPU == 1 )
132  #define CPU_HARDWARE_FP TRUE
133#else
134  #define CPU_HARDWARE_FP FALSE
135#endif
136
137#define CPU_SOFTWARE_FP FALSE
138
139#define CPU_ALL_TASKS_ARE_FP FALSE
140
141#define CPU_IDLE_TASK_IS_FP FALSE
142
143#define CPU_USE_DEFERRED_FP_SWITCH FALSE
144
145#if defined(ARM_MULTILIB_ARCH_V7M)
146  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
147#else
148  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
149#endif
150
151#define CPU_STACK_GROWS_UP FALSE
152
153/* XXX Why 32? */
154#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
155
156#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
157
158/*
159 * The interrupt mask disables only normal interrupts (IRQ).
160 *
161 * In order to support fast interrupts (FIQ) such that they can do something
162 * useful, we have to disable the operating system support for FIQs.  Having
163 * operating system support for them would require that FIQs are disabled
164 * during critical sections of the operating system and application.  At this
165 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
166 * the non critical sections of IRQs, so here they would have a small
167 * advantage.  Without operating system support, the FIQs can execute at any
168 * time (of course not during the service of another FIQ). If someone needs
169 * operating system support for a FIQ, she can trigger a software interrupt and
170 * service the request in a two-step process.
171 */
172#define CPU_MODES_INTERRUPT_MASK 0x80
173
174#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
175
176#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
177
178#define CPU_INTERRUPT_NUMBER_OF_VECTORS 8
179
180#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
181
182#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
183
184#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
185
186/* AAPCS, section 4.1, Fundamental Data Types */
187#define CPU_SIZEOF_POINTER 4
188
189/* AAPCS, section 4.1, Fundamental Data Types */
190#define CPU_ALIGNMENT 8
191
192#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
193
194/* AAPCS, section 4.3.1, Aggregates */
195#define CPU_PARTITION_ALIGNMENT 4
196
197/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
198#define CPU_STACK_ALIGNMENT 8
199
200/*
201 * Bitfield handler macros.
202 *
203 * If we had a particularly fast function for finding the first
204 * bit set in a word, it would go here. Since we don't (*), we'll
205 * just use the universal macros.
206 *
207 * (*) On ARM V5 and later, there's a CLZ function which could be
208 *     used to implement much quicker than the default macro.
209 */
210
211#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
212
213#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
214
215/** @} */
216
217#ifndef ASM
218
219#ifdef __cplusplus
220extern "C" {
221#endif
222
223/**
224 * @addtogroup ScoreCPU
225 */
226/**@{**/
227
228typedef struct {
229#if defined(ARM_MULTILIB_ARCH_V4)
230  uint32_t register_cpsr;
231  uint32_t register_r4;
232  uint32_t register_r5;
233  uint32_t register_r6;
234  uint32_t register_r7;
235  uint32_t register_r8;
236  uint32_t register_r9;
237  uint32_t register_r10;
238  uint32_t register_fp;
239  uint32_t register_sp;
240  uint32_t register_lr;
241#elif defined(ARM_MULTILIB_ARCH_V7M)
242  uint32_t register_r4;
243  uint32_t register_r5;
244  uint32_t register_r6;
245  uint32_t register_r7;
246  uint32_t register_r8;
247  uint32_t register_r9;
248  uint32_t register_r10;
249  uint32_t register_r11;
250  void *register_lr;
251  void *register_sp;
252  uint32_t isr_nest_level;
253#else
254  void *register_sp;
255#endif
256} Context_Control;
257
258typedef struct {
259  /* Not supported */
260} Context_Control_fp;
261
262SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context;
263
264extern uint32_t arm_cpu_mode;
265
266static inline uint32_t arm_interrupt_disable( void )
267{
268  uint32_t level;
269
270#if defined(ARM_MULTILIB_ARCH_V4)
271  uint32_t arm_switch_reg;
272
273  __asm__ volatile (
274    ARM_SWITCH_TO_ARM
275    "mrs %[level], cpsr\n"
276    "orr %[arm_switch_reg], %[level], #0x80\n"
277    "msr cpsr, %[arm_switch_reg]\n"
278    ARM_SWITCH_BACK
279    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
280  );
281#elif defined(ARM_MULTILIB_ARCH_V7M)
282  uint32_t basepri = 0x80;
283
284  __asm__ volatile (
285    "mrs %[level], basepri\n"
286    "msr basepri_max, %[basepri]\n"
287    : [level] "=&r" (level)
288    : [basepri] "r" (basepri)
289  );
290#else
291  level = 0;
292#endif
293
294  return level;
295}
296
297static inline void arm_interrupt_enable( uint32_t level )
298{
299#if defined(ARM_MULTILIB_ARCH_V4)
300  ARM_SWITCH_REGISTERS;
301
302  __asm__ volatile (
303    ARM_SWITCH_TO_ARM
304    "msr cpsr, %[level]\n"
305    ARM_SWITCH_BACK
306    : ARM_SWITCH_OUTPUT
307    : [level] "r" (level)
308  );
309#elif defined(ARM_MULTILIB_ARCH_V7M)
310  __asm__ volatile (
311    "msr basepri, %[level]\n"
312    :
313    : [level] "r" (level)
314  );
315#endif
316}
317
318static inline void arm_interrupt_flash( uint32_t level )
319{
320#if defined(ARM_MULTILIB_ARCH_V4)
321  uint32_t arm_switch_reg;
322
323  __asm__ volatile (
324    ARM_SWITCH_TO_ARM
325    "mrs %[arm_switch_reg], cpsr\n"
326    "msr cpsr, %[level]\n"
327    "msr cpsr, %[arm_switch_reg]\n"
328    ARM_SWITCH_BACK
329    : [arm_switch_reg] "=&r" (arm_switch_reg)
330    : [level] "r" (level)
331  );
332#elif defined(ARM_MULTILIB_ARCH_V7M)
333  uint32_t basepri;
334
335  __asm__ volatile (
336    "mrs %[basepri], basepri\n"
337    "msr basepri, %[level]\n"
338    "msr basepri, %[basepri]\n"
339    : [basepri] "=&r" (basepri)
340    : [level] "r" (level)
341  );
342#endif
343}
344
345#define _CPU_ISR_Disable( _isr_cookie ) \
346  do { \
347    _isr_cookie = arm_interrupt_disable(); \
348  } while (0)
349
350#define _CPU_ISR_Enable( _isr_cookie )  \
351  arm_interrupt_enable( _isr_cookie )
352
353#define _CPU_ISR_Flash( _isr_cookie ) \
354  arm_interrupt_flash( _isr_cookie )
355
356void _CPU_ISR_Set_level( uint32_t level );
357
358uint32_t _CPU_ISR_Get_level( void );
359
360void _CPU_Context_Initialize(
361  Context_Control *the_context,
362  void *stack_area_begin,
363  size_t stack_area_size,
364  uint32_t new_level,
365  void (*entry_point)( void ),
366  bool is_fp
367);
368
369#define _CPU_Context_Get_SP( _context ) \
370  (_context)->register_sp
371
372#define _CPU_Context_Restart_self( _the_context ) \
373   _CPU_Context_restore( (_the_context) );
374
375#define _CPU_Context_Fp_start( _base, _offset ) \
376   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
377
378#define _CPU_Context_Initialize_fp( _destination ) \
379  do { \
380    *(*(_destination)) = _CPU_Null_fp_context; \
381  } while (0)
382
383#define _CPU_Fatal_halt( _err )             \
384   do {                                     \
385     uint32_t _level;                       \
386     uint32_t _error = _err;                \
387     _CPU_ISR_Disable( _level );            \
388     __asm__ volatile ("mov r0, %0\n"           \
389                   : "=r" (_error)          \
390                   : "0" (_error)           \
391                   : "r0" );                \
392     while (1);                             \
393   } while (0);
394
395/**
396 * @brief CPU initialization.
397 */
398void _CPU_Initialize( void );
399
400void _CPU_ISR_install_vector(
401  uint32_t vector,
402  proc_ptr new_handler,
403  proc_ptr *old_handler
404);
405
406/**
407 * @brief CPU switch context.
408 */
409void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
410
411void _CPU_Context_restore( Context_Control *new_context )
412  RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
413
414#if defined(ARM_MULTILIB_ARCH_V7M)
415  void _ARMV7M_Start_multitasking( Context_Control *bsp, Context_Control *heir );
416  void _ARMV7M_Stop_multitasking( Context_Control *bsp )
417    RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
418  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
419  #define _CPU_Stop_multitasking _ARMV7M_Stop_multitasking
420#endif
421
422void _CPU_Context_save_fp( Context_Control_fp **fp_context_ptr );
423
424void _CPU_Context_restore_fp( Context_Control_fp **fp_context_ptr );
425
426static inline uint32_t CPU_swap_u32( uint32_t value )
427{
428#if defined(__thumb2__)
429  __asm__ volatile (
430    "rev %0, %0"
431    : "=r" (value)
432    : "0" (value)
433  );
434  return value;
435#elif defined(__thumb__)
436  uint32_t byte1, byte2, byte3, byte4, swapped;
437
438  byte4 = (value >> 24) & 0xff;
439  byte3 = (value >> 16) & 0xff;
440  byte2 = (value >> 8)  & 0xff;
441  byte1 =  value & 0xff;
442
443  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
444  return swapped;
445#else
446  uint32_t tmp = value; /* make compiler warnings go away */
447  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
448                "BIC %1, %1, #0xff0000\n"
449                "MOV %0, %0, ROR #8\n"
450                "EOR %0, %0, %1, LSR #8\n"
451                : "=r" (value), "=r" (tmp)
452                : "0" (value), "1" (tmp));
453  return value;
454#endif
455}
456
457static inline uint16_t CPU_swap_u16( uint16_t value )
458{
459#if defined(__thumb2__)
460  __asm__ volatile (
461    "rev16 %0, %0"
462    : "=r" (value)
463    : "0" (value)
464  );
465  return value;
466#else
467  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
468#endif
469}
470
471#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
472  void *_CPU_Thread_Idle_body( uintptr_t ignored );
473#endif
474
475/** @} */
476
477/**
478 * @addtogroup ScoreCPUARM
479 */
480/**@{**/
481
482#if defined(ARM_MULTILIB_ARCH_V4)
483
484typedef enum {
485  ARM_EXCEPTION_RESET = 0,
486  ARM_EXCEPTION_UNDEF = 1,
487  ARM_EXCEPTION_SWI = 2,
488  ARM_EXCEPTION_PREF_ABORT = 3,
489  ARM_EXCEPTION_DATA_ABORT = 4,
490  ARM_EXCEPTION_RESERVED = 5,
491  ARM_EXCEPTION_IRQ = 6,
492  ARM_EXCEPTION_FIQ = 7,
493  MAX_EXCEPTIONS = 8,
494  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
495} Arm_symbolic_exception_name;
496
497#endif /* defined(ARM_MULTILIB_ARCH_V4) */
498
499typedef struct {
500  uint32_t register_r0;
501  uint32_t register_r1;
502  uint32_t register_r2;
503  uint32_t register_r3;
504  uint32_t register_r4;
505  uint32_t register_r5;
506  uint32_t register_r6;
507  uint32_t register_r7;
508  uint32_t register_r8;
509  uint32_t register_r9;
510  uint32_t register_r10;
511  uint32_t register_r11;
512  uint32_t register_r12;
513  uint32_t register_sp;
514  void *register_lr;
515  void *register_pc;
516#if defined(ARM_MULTILIB_ARCH_V4)
517  uint32_t register_cpsr;
518  Arm_symbolic_exception_name vector;
519#elif defined(ARM_MULTILIB_ARCH_V7M)
520  uint32_t register_xpsr;
521  uint32_t vector;
522#endif
523} CPU_Exception_frame;
524
525typedef CPU_Exception_frame CPU_Interrupt_frame;
526
527void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
528
529void _ARM_Exception_default( CPU_Exception_frame *frame );
530
531/** @} */
532
533#ifdef __cplusplus
534}
535#endif
536
537#endif /* ASM */
538
539#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.