source: rtems/cpukit/score/cpu/arm/rtems/score/cpu.h @ c5ed148

4.115
Last change on this file since c5ed148 was c5ed148, checked in by Sebastian Huber <sebastian.huber@…>, on 09/24/11 at 12:56:51

2011-09-24 Sebastian Huber <sebastian.huber@…>

  • rtems/score/armv7m.h, armv7m-context-initialize.c, armv7m-context-restore.c, armv7m-context-switch.c, armv7m-exception-handler-get.c, armv7m-exception-handler-set.c, armv7m-exception-priority-get.c, armv7m-exception-priority-set.c, armv7m-initialize.c, armv7m-isr-dispatch.c, armv7m-isr-enter-leave.c, armv7m-isr-level-get.c, armv7m-isr-level-set.c, armv7m-isr-vector-install.c, armv7m-multitasking-start-stop.c: New files.
  • Makefile.am, preinstall.am: Reflect changes above.
  • rtems/score/arm.h: Define ARM_MULTILIB_ARCH_V4 and ARM_MULTILIB_ARCH_V7M.
  • rtems/score/cpu.h, cpu_asm.S, cpu.c, arm_exc_abort.S, arm_exc_handler_high.c, arm_exc_handler_low.S, arm_exc_interrupt.S: Define CPU_HAS_HARDWARE_INTERRUPT_STACK to FALSE. Use ARM_MULTILIB_ARCH_V4 and ARM_MULTILIB_ARCH_V7M.
  • Property mode set to 100644
File size: 13.0 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreCPU
5 *
6 * @brief ARM architecture support API.
7 */
8
9/*
10 * $Id$
11 *
12 *  This include file contains information pertaining to the ARM
13 *  processor.
14 *
15 *  Copyright (c) 2009-2011 embedded brains GmbH.
16 *
17 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
18 *
19 *  Copyright (c) 2006 OAR Corporation
20 *
21 *  Copyright (c) 2002 Advent Networks, Inc.
22 *        Jay Monkman <jmonkman@adventnetworks.com>
23 *
24 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
25 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
26 *
27 *  The license and distribution terms for this file may be
28 *  found in the file LICENSE in this distribution or at
29 *  http://www.rtems.com/license/LICENSE.
30 *
31 */
32
33#ifndef _RTEMS_SCORE_CPU_H
34#define _RTEMS_SCORE_CPU_H
35
36#include <rtems/score/types.h>
37#include <rtems/score/arm.h>
38
39#if defined(ARM_MULTILIB_ARCH_V4)
40
41/**
42 * @defgroup ScoreCPUARM ARM Specific Support
43 *
44 * @ingroup ScoreCPU
45 *
46 * @brief ARM specific support.
47 *
48 * @{
49 */
50
51#ifdef __thumb__
52  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
53  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
54  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
55  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
56  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
57#else
58  #define ARM_SWITCH_REGISTERS
59  #define ARM_SWITCH_TO_ARM
60  #define ARM_SWITCH_BACK
61  #define ARM_SWITCH_OUTPUT
62  #define ARM_SWITCH_ADDITIONAL_OUTPUT
63#endif
64
65/**
66 * @name Program Status Register
67 *
68 * @{
69 */
70
71#define ARM_PSR_N (1 << 31)
72#define ARM_PSR_Z (1 << 30)
73#define ARM_PSR_C (1 << 29)
74#define ARM_PSR_V (1 << 28)
75#define ARM_PSR_Q (1 << 27)
76#define ARM_PSR_J (1 << 24)
77#define ARM_PSR_GE_SHIFT 16
78#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
79#define ARM_PSR_E (1 << 9)
80#define ARM_PSR_A (1 << 8)
81#define ARM_PSR_I (1 << 7)
82#define ARM_PSR_F (1 << 6)
83#define ARM_PSR_T (1 << 5)
84#define ARM_PSR_M_SHIFT 0
85#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
86#define ARM_PSR_M_USR 0x10
87#define ARM_PSR_M_FIQ 0x11
88#define ARM_PSR_M_IRQ 0x12
89#define ARM_PSR_M_SVC 0x13
90#define ARM_PSR_M_ABT 0x17
91#define ARM_PSR_M_UND 0x1b
92#define ARM_PSR_M_SYS 0x1f
93
94/** @} */
95
96/** @} */
97
98#endif /* defined(ARM_MULTILIB_ARCH_V4) */
99
100/**
101 * @addtogroup ScoreCPU
102 *
103 * @{
104 */
105
106/* If someone uses THUMB we assume she wants minimal code size */
107#ifdef __thumb__
108  #define CPU_INLINE_ENABLE_DISPATCH FALSE
109#else
110  #define CPU_INLINE_ENABLE_DISPATCH TRUE
111#endif
112
113#if defined(__ARMEL__)
114  #define CPU_BIG_ENDIAN FALSE
115  #define CPU_LITTLE_ENDIAN TRUE
116#elif defined(__ARMEB__)
117  #define CPU_BIG_ENDIAN TRUE
118  #define CPU_LITTLE_ENDIAN FALSE
119#else
120  #error "unknown endianness"
121#endif
122
123#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
124
125#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
126
127#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
128
129#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
130
131#define CPU_ISR_PASSES_FRAME_POINTER 0
132
133#if ( ARM_HAS_FPU == 1 )
134  #define CPU_HARDWARE_FP TRUE
135#else
136  #define CPU_HARDWARE_FP FALSE
137#endif
138
139#define CPU_SOFTWARE_FP FALSE
140
141#define CPU_ALL_TASKS_ARE_FP FALSE
142
143#define CPU_IDLE_TASK_IS_FP FALSE
144
145#define CPU_USE_DEFERRED_FP_SWITCH FALSE
146
147#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
148
149#define CPU_STACK_GROWS_UP FALSE
150
151/* XXX Why 32? */
152#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
153
154/*
155 * The interrupt mask disables only normal interrupts (IRQ).
156 *
157 * In order to support fast interrupts (FIQ) such that they can do something
158 * useful, we have to disable the operating system support for FIQs.  Having
159 * operating system support for them would require that FIQs are disabled
160 * during critical sections of the operating system and application.  At this
161 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
162 * the non critical sections of IRQs, so here they would have a small
163 * advantage.  Without operating system support, the FIQs can execute at any
164 * time (of course not during the service of another FIQ). If someone needs
165 * operating system support for a FIQ, she can trigger a software interrupt and
166 * service the request in a two-step process.
167 */
168#define CPU_MODES_INTERRUPT_MASK 0x80
169
170#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
171
172#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
173
174#define CPU_INTERRUPT_NUMBER_OF_VECTORS 8
175
176#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
177
178#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
179
180#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
181
182/* AAPCS, section 4.1, Fundamental Data Types */
183#define CPU_ALIGNMENT 8
184
185#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
186
187/* AAPCS, section 4.3.1, Aggregates */
188#define CPU_PARTITION_ALIGNMENT 4
189
190/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
191#define CPU_STACK_ALIGNMENT 8
192
193/*
194 * Bitfield handler macros.
195 *
196 * If we had a particularly fast function for finding the first
197 * bit set in a word, it would go here. Since we don't (*), we'll
198 * just use the universal macros.
199 *
200 * (*) On ARM V5 and later, there's a CLZ function which could be
201 *     used to implement much quicker than the default macro.
202 */
203
204#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
205
206#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
207
208/** @} */
209
210#ifndef ASM
211
212#ifdef __cplusplus
213extern "C" {
214#endif
215
216/**
217 * @addtogroup ScoreCPU
218 *
219 * @{
220 */
221
222typedef struct {
223#if defined(ARM_MULTILIB_ARCH_V4)
224  uint32_t register_cpsr;
225  uint32_t register_r4;
226  uint32_t register_r5;
227  uint32_t register_r6;
228  uint32_t register_r7;
229  uint32_t register_r8;
230  uint32_t register_r9;
231  uint32_t register_r10;
232  uint32_t register_fp;
233  uint32_t register_sp;
234  uint32_t register_lr;
235  uint32_t register_pc;
236#elif defined(ARM_MULTILIB_ARCH_V7M)
237  uint32_t register_r4;
238  uint32_t register_r5;
239  uint32_t register_r6;
240  uint32_t register_r7;
241  uint32_t register_r8;
242  uint32_t register_r9;
243  uint32_t register_r10;
244  uint32_t register_r11;
245  void *register_lr;
246  void *register_sp;
247  uint32_t isr_nest_level;
248#endif
249} Context_Control;
250
251typedef struct {
252  /* Not supported */
253} Context_Control_fp;
254
255SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context;
256
257extern uint32_t arm_cpu_mode;
258
259static inline uint32_t arm_interrupt_disable( void )
260{
261#if defined(ARM_MULTILIB_ARCH_V4)
262  uint32_t arm_switch_reg;
263  uint32_t level;
264
265  __asm__ volatile (
266    ARM_SWITCH_TO_ARM
267    "mrs %[level], cpsr\n"
268    "orr %[arm_switch_reg], %[level], #0x80\n"
269    "msr cpsr, %[arm_switch_reg]\n"
270    ARM_SWITCH_BACK
271    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
272  );
273
274  return level;
275#elif defined(ARM_MULTILIB_ARCH_V7M)
276  uint32_t level;
277  uint32_t basepri = 0x80;
278
279  __asm__ volatile (
280    "mrs %[level], basepri\n"
281    "msr basepri_max, %[basepri]\n"
282    : [level] "=&r" (level)
283    : [basepri] "r" (basepri)
284  );
285
286  return level;
287#endif
288}
289
290static inline void arm_interrupt_enable( uint32_t level )
291{
292#if defined(ARM_MULTILIB_ARCH_V4)
293  ARM_SWITCH_REGISTERS;
294
295  __asm__ volatile (
296    ARM_SWITCH_TO_ARM
297    "msr cpsr, %[level]\n"
298    ARM_SWITCH_BACK
299    : ARM_SWITCH_OUTPUT
300    : [level] "r" (level)
301  );
302#elif defined(ARM_MULTILIB_ARCH_V7M)
303  __asm__ volatile (
304    "msr basepri, %[level]\n"
305    :
306    : [level] "r" (level)
307  );
308#endif
309}
310
311static inline void arm_interrupt_flash( uint32_t level )
312{
313#if defined(ARM_MULTILIB_ARCH_V4)
314  uint32_t arm_switch_reg;
315
316  __asm__ volatile (
317    ARM_SWITCH_TO_ARM
318    "mrs %[arm_switch_reg], cpsr\n"
319    "msr cpsr, %[level]\n"
320    "msr cpsr, %[arm_switch_reg]\n"
321    ARM_SWITCH_BACK
322    : [arm_switch_reg] "=&r" (arm_switch_reg)
323    : [level] "r" (level)
324  );
325#elif defined(ARM_MULTILIB_ARCH_V7M)
326  uint32_t basepri;
327
328  __asm__ volatile (
329    "mrs %[basepri], basepri\n"
330    "msr basepri, %[level]\n"
331    "msr basepri, %[basepri]\n"
332    : [basepri] "=&r" (basepri)
333    : [level] "r" (level)
334  );
335#endif
336}
337
338#define _CPU_ISR_Disable( _isr_cookie ) \
339  do { \
340    _isr_cookie = arm_interrupt_disable(); \
341  } while (0)
342
343#define _CPU_ISR_Enable( _isr_cookie )  \
344  arm_interrupt_enable( _isr_cookie )
345
346#define _CPU_ISR_Flash( _isr_cookie ) \
347  arm_interrupt_flash( _isr_cookie )
348
349void _CPU_ISR_Set_level( uint32_t level );
350
351uint32_t _CPU_ISR_Get_level( void );
352
353void _CPU_Context_Initialize(
354  Context_Control *the_context,
355  void *stack_area_begin,
356  size_t stack_area_size,
357  uint32_t new_level,
358  void (*entry_point)( void ),
359  bool is_fp
360);
361
362#define _CPU_Context_Get_SP( _context ) \
363  (_context)->register_sp
364
365#define _CPU_Context_Restart_self( _the_context ) \
366   _CPU_Context_restore( (_the_context) );
367
368#define _CPU_Context_Fp_start( _base, _offset ) \
369   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
370
371#define _CPU_Context_Initialize_fp( _destination ) \
372  do { \
373    *(*(_destination)) = _CPU_Null_fp_context; \
374  } while (0)
375
376#define _CPU_Fatal_halt( _err )             \
377   do {                                     \
378     uint32_t _level;                       \
379     uint32_t _error = _err;                \
380     _CPU_ISR_Disable( _level );            \
381     __asm__ volatile ("mov r0, %0\n"           \
382                   : "=r" (_error)          \
383                   : "0" (_error)           \
384                   : "r0" );                \
385     while (1);                             \
386   } while (0);
387
388void _CPU_Initialize( void );
389
390#define _CPU_Initialize_vectors()
391
392void _CPU_ISR_install_vector(
393  uint32_t vector,
394  proc_ptr new_handler,
395  proc_ptr *old_handler
396);
397
398void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
399
400void _CPU_Context_restore( Context_Control *new_context )
401  RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
402
403#if defined(ARM_MULTILIB_ARCH_V7M)
404  void _ARMV7M_Start_multitasking( Context_Control *bsp, Context_Control *heir );
405  void _ARMV7M_Stop_multitasking( Context_Control *bsp )
406    RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
407  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
408  #define _CPU_Stop_multitasking _ARMV7M_Stop_multitasking
409#endif
410
411void _CPU_Context_save_fp( Context_Control_fp **fp_context_ptr );
412
413void _CPU_Context_restore_fp( Context_Control_fp **fp_context_ptr );
414
415static inline uint32_t CPU_swap_u32( uint32_t value )
416{
417#if defined(__thumb2__)
418  __asm__ volatile (
419    "rev %0, %0"
420    : "=r" (value)
421    : "0" (value)
422  );
423  return value;
424#elif defined(__thumb__)
425  uint32_t byte1, byte2, byte3, byte4, swapped;
426
427  byte4 = (value >> 24) & 0xff;
428  byte3 = (value >> 16) & 0xff;
429  byte2 = (value >> 8)  & 0xff;
430  byte1 =  value & 0xff;
431
432  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
433  return swapped;
434#else
435  uint32_t tmp = value; /* make compiler warnings go away */
436  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
437                "BIC %1, %1, #0xff0000\n"
438                "MOV %0, %0, ROR #8\n"
439                "EOR %0, %0, %1, LSR #8\n"
440                : "=r" (value), "=r" (tmp)
441                : "0" (value), "1" (tmp));
442  return value;
443#endif
444}
445
446static inline uint16_t CPU_swap_u16( uint16_t value )
447{
448#if defined(__thumb2__)
449  __asm__ volatile (
450    "rev16 %0, %0"
451    : "=r" (value)
452    : "0" (value)
453  );
454  return value;
455#else
456  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
457#endif
458}
459
460/** @} */
461
462#if defined(ARM_MULTILIB_ARCH_V4)
463
464/**
465 * @addtogroup ScoreCPUARM
466 *
467 * @{
468 */
469
470typedef struct {
471  uint32_t r0;
472  uint32_t r1;
473  uint32_t r2;
474  uint32_t r3;
475  uint32_t r4;
476  uint32_t r5;
477  uint32_t r6;
478  uint32_t r7;
479  uint32_t r8;
480  uint32_t r9;
481  uint32_t r10;
482  uint32_t r11;
483  uint32_t r12;
484  uint32_t sp;
485  uint32_t lr;
486  uint32_t pc;
487  uint32_t cpsr;
488} arm_cpu_context;
489
490typedef void arm_exc_abort_handler( arm_cpu_context *context );
491
492typedef enum {
493  ARM_EXCEPTION_RESET = 0,
494  ARM_EXCEPTION_UNDEF = 1,
495  ARM_EXCEPTION_SWI = 2,
496  ARM_EXCEPTION_PREF_ABORT = 3,
497  ARM_EXCEPTION_DATA_ABORT = 4,
498  ARM_EXCEPTION_RESERVED = 5,
499  ARM_EXCEPTION_IRQ = 6,
500  ARM_EXCEPTION_FIQ = 7,
501  MAX_EXCEPTIONS = 8
502} Arm_symbolic_exception_name;
503
504static inline uint32_t arm_status_irq_enable( void )
505{
506  uint32_t arm_switch_reg;
507  uint32_t psr;
508
509  RTEMS_COMPILER_MEMORY_BARRIER();
510
511  __asm__ volatile (
512    ARM_SWITCH_TO_ARM
513    "mrs %[psr], cpsr\n"
514    "bic %[arm_switch_reg], %[psr], #0x80\n"
515    "msr cpsr, %[arm_switch_reg]\n"
516    ARM_SWITCH_BACK
517    : [arm_switch_reg] "=&r" (arm_switch_reg), [psr] "=&r" (psr)
518  );
519
520  return psr;
521}
522
523static inline void arm_status_restore( uint32_t psr )
524{
525  ARM_SWITCH_REGISTERS;
526
527  __asm__ volatile (
528    ARM_SWITCH_TO_ARM
529    "msr cpsr, %[psr]\n"
530    ARM_SWITCH_BACK
531    : ARM_SWITCH_OUTPUT
532    : [psr] "r" (psr)
533  );
534
535  RTEMS_COMPILER_MEMORY_BARRIER();
536}
537
538void arm_exc_data_abort_set_handler( arm_exc_abort_handler handler );
539
540void arm_exc_data_abort( void );
541
542void arm_exc_prefetch_abort_set_handler( arm_exc_abort_handler handler );
543
544void arm_exc_prefetch_abort( void );
545
546void bsp_interrupt_dispatch( void );
547
548void arm_exc_interrupt( void );
549
550void arm_exc_undefined( void );
551
552/** @} */
553
554/* XXX This is out of date */
555typedef struct {
556  uint32_t register_r0;
557  uint32_t register_r1;
558  uint32_t register_r2;
559  uint32_t register_r3;
560  uint32_t register_ip;
561  uint32_t register_lr;
562} CPU_Exception_frame;
563
564typedef CPU_Exception_frame CPU_Interrupt_frame;
565
566#elif defined(ARM_MULTILIB_ARCH_V7M)
567
568typedef void CPU_Interrupt_frame;
569
570#endif /* defined(ARM_MULTILIB_ARCH_V7M) */
571
572#ifdef __cplusplus
573}
574#endif
575
576#endif /* ASM */
577
578#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.