source: rtems/cpukit/score/cpu/arm/rtems/score/cpu.h @ 31eebc9

4.115
Last change on this file since 31eebc9 was 31eebc9, checked in by Sebastian Huber <sebastian.huber@…>, on 01/04/13 at 11:32:19

arm: Delete unused arm_exc_undefined()

  • Property mode set to 100644
File size: 13.5 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreCPU
5 *
6 * @brief ARM architecture support API.
7 */
8
9/*
10 *  This include file contains information pertaining to the ARM
11 *  processor.
12 *
13 *  Copyright (c) 2009-2011 embedded brains GmbH.
14 *
15 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
16 *
17 *  Copyright (c) 2006 OAR Corporation
18 *
19 *  Copyright (c) 2002 Advent Networks, Inc.
20 *        Jay Monkman <jmonkman@adventnetworks.com>
21 *
22 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
23 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
24 *
25 *  The license and distribution terms for this file may be
26 *  found in the file LICENSE in this distribution or at
27 *  http://www.rtems.com/license/LICENSE.
28 *
29 */
30
31#ifndef _RTEMS_SCORE_CPU_H
32#define _RTEMS_SCORE_CPU_H
33
34#include <rtems/score/types.h>
35#include <rtems/score/arm.h>
36
37#if defined(ARM_MULTILIB_ARCH_V4)
38
39/**
40 * @defgroup ScoreCPUARM ARM Specific Support
41 *
42 * @ingroup ScoreCPU
43 *
44 * @brief ARM specific support.
45 *
46 * @{
47 */
48
49#ifdef __thumb__
50  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
51  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
52  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
53  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
54  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
55#else
56  #define ARM_SWITCH_REGISTERS
57  #define ARM_SWITCH_TO_ARM
58  #define ARM_SWITCH_BACK
59  #define ARM_SWITCH_OUTPUT
60  #define ARM_SWITCH_ADDITIONAL_OUTPUT
61#endif
62
63/**
64 * @name Program Status Register
65 *
66 * @{
67 */
68
69#define ARM_PSR_N (1 << 31)
70#define ARM_PSR_Z (1 << 30)
71#define ARM_PSR_C (1 << 29)
72#define ARM_PSR_V (1 << 28)
73#define ARM_PSR_Q (1 << 27)
74#define ARM_PSR_J (1 << 24)
75#define ARM_PSR_GE_SHIFT 16
76#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
77#define ARM_PSR_E (1 << 9)
78#define ARM_PSR_A (1 << 8)
79#define ARM_PSR_I (1 << 7)
80#define ARM_PSR_F (1 << 6)
81#define ARM_PSR_T (1 << 5)
82#define ARM_PSR_M_SHIFT 0
83#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
84#define ARM_PSR_M_USR 0x10
85#define ARM_PSR_M_FIQ 0x11
86#define ARM_PSR_M_IRQ 0x12
87#define ARM_PSR_M_SVC 0x13
88#define ARM_PSR_M_ABT 0x17
89#define ARM_PSR_M_UND 0x1b
90#define ARM_PSR_M_SYS 0x1f
91
92/** @} */
93
94/** @} */
95
96#endif /* defined(ARM_MULTILIB_ARCH_V4) */
97
98/**
99 * @addtogroup ScoreCPU
100 *
101 * @{
102 */
103
104/* If someone uses THUMB we assume she wants minimal code size */
105#ifdef __thumb__
106  #define CPU_INLINE_ENABLE_DISPATCH FALSE
107#else
108  #define CPU_INLINE_ENABLE_DISPATCH TRUE
109#endif
110
111#if defined(__ARMEL__)
112  #define CPU_BIG_ENDIAN FALSE
113  #define CPU_LITTLE_ENDIAN TRUE
114#elif defined(__ARMEB__)
115  #define CPU_BIG_ENDIAN TRUE
116  #define CPU_LITTLE_ENDIAN FALSE
117#else
118  #error "unknown endianness"
119#endif
120
121#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
122
123/*
124 *  The ARM uses the PIC interrupt model.
125 */
126#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
127
128#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
129
130#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
131
132#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
133
134#define CPU_ISR_PASSES_FRAME_POINTER 0
135
136#if ( ARM_HAS_FPU == 1 )
137  #define CPU_HARDWARE_FP TRUE
138#else
139  #define CPU_HARDWARE_FP FALSE
140#endif
141
142#define CPU_SOFTWARE_FP FALSE
143
144#define CPU_ALL_TASKS_ARE_FP FALSE
145
146#define CPU_IDLE_TASK_IS_FP FALSE
147
148#define CPU_USE_DEFERRED_FP_SWITCH FALSE
149
150#if defined(ARM_MULTILIB_ARCH_V7M)
151  #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
152#else
153  #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
154#endif
155
156#define CPU_STACK_GROWS_UP FALSE
157
158/* XXX Why 32? */
159#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
160
161#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
162
163/*
164 * The interrupt mask disables only normal interrupts (IRQ).
165 *
166 * In order to support fast interrupts (FIQ) such that they can do something
167 * useful, we have to disable the operating system support for FIQs.  Having
168 * operating system support for them would require that FIQs are disabled
169 * during critical sections of the operating system and application.  At this
170 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
171 * the non critical sections of IRQs, so here they would have a small
172 * advantage.  Without operating system support, the FIQs can execute at any
173 * time (of course not during the service of another FIQ). If someone needs
174 * operating system support for a FIQ, she can trigger a software interrupt and
175 * service the request in a two-step process.
176 */
177#define CPU_MODES_INTERRUPT_MASK 0x80
178
179#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
180
181#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
182
183#define CPU_INTERRUPT_NUMBER_OF_VECTORS 8
184
185#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
186
187#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
188
189#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
190
191/* AAPCS, section 4.1, Fundamental Data Types */
192#define CPU_SIZEOF_POINTER 4
193
194/* AAPCS, section 4.1, Fundamental Data Types */
195#define CPU_ALIGNMENT 8
196
197#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
198
199/* AAPCS, section 4.3.1, Aggregates */
200#define CPU_PARTITION_ALIGNMENT 4
201
202/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
203#define CPU_STACK_ALIGNMENT 8
204
205/*
206 * Bitfield handler macros.
207 *
208 * If we had a particularly fast function for finding the first
209 * bit set in a word, it would go here. Since we don't (*), we'll
210 * just use the universal macros.
211 *
212 * (*) On ARM V5 and later, there's a CLZ function which could be
213 *     used to implement much quicker than the default macro.
214 */
215
216#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
217
218#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
219
220/** @} */
221
222#ifndef ASM
223
224#ifdef __cplusplus
225extern "C" {
226#endif
227
228/**
229 * @addtogroup ScoreCPU
230 *
231 * @{
232 */
233
234typedef struct {
235#if defined(ARM_MULTILIB_ARCH_V4)
236  uint32_t register_cpsr;
237  uint32_t register_r4;
238  uint32_t register_r5;
239  uint32_t register_r6;
240  uint32_t register_r7;
241  uint32_t register_r8;
242  uint32_t register_r9;
243  uint32_t register_r10;
244  uint32_t register_fp;
245  uint32_t register_sp;
246  uint32_t register_lr;
247#elif defined(ARM_MULTILIB_ARCH_V7M)
248  uint32_t register_r4;
249  uint32_t register_r5;
250  uint32_t register_r6;
251  uint32_t register_r7;
252  uint32_t register_r8;
253  uint32_t register_r9;
254  uint32_t register_r10;
255  uint32_t register_r11;
256  void *register_lr;
257  void *register_sp;
258  uint32_t isr_nest_level;
259#else
260  void *register_sp;
261#endif
262} Context_Control;
263
264typedef struct {
265  /* Not supported */
266} Context_Control_fp;
267
268SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context;
269
270extern uint32_t arm_cpu_mode;
271
272static inline uint32_t arm_interrupt_disable( void )
273{
274  uint32_t level;
275
276#if defined(ARM_MULTILIB_ARCH_V4)
277  uint32_t arm_switch_reg;
278
279  __asm__ volatile (
280    ARM_SWITCH_TO_ARM
281    "mrs %[level], cpsr\n"
282    "orr %[arm_switch_reg], %[level], #0x80\n"
283    "msr cpsr, %[arm_switch_reg]\n"
284    ARM_SWITCH_BACK
285    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
286  );
287#elif defined(ARM_MULTILIB_ARCH_V7M)
288  uint32_t basepri = 0x80;
289
290  __asm__ volatile (
291    "mrs %[level], basepri\n"
292    "msr basepri_max, %[basepri]\n"
293    : [level] "=&r" (level)
294    : [basepri] "r" (basepri)
295  );
296#else
297  level = 0;
298#endif
299
300  return level;
301}
302
303static inline void arm_interrupt_enable( uint32_t level )
304{
305#if defined(ARM_MULTILIB_ARCH_V4)
306  ARM_SWITCH_REGISTERS;
307
308  __asm__ volatile (
309    ARM_SWITCH_TO_ARM
310    "msr cpsr, %[level]\n"
311    ARM_SWITCH_BACK
312    : ARM_SWITCH_OUTPUT
313    : [level] "r" (level)
314  );
315#elif defined(ARM_MULTILIB_ARCH_V7M)
316  __asm__ volatile (
317    "msr basepri, %[level]\n"
318    :
319    : [level] "r" (level)
320  );
321#endif
322}
323
324static inline void arm_interrupt_flash( uint32_t level )
325{
326#if defined(ARM_MULTILIB_ARCH_V4)
327  uint32_t arm_switch_reg;
328
329  __asm__ volatile (
330    ARM_SWITCH_TO_ARM
331    "mrs %[arm_switch_reg], cpsr\n"
332    "msr cpsr, %[level]\n"
333    "msr cpsr, %[arm_switch_reg]\n"
334    ARM_SWITCH_BACK
335    : [arm_switch_reg] "=&r" (arm_switch_reg)
336    : [level] "r" (level)
337  );
338#elif defined(ARM_MULTILIB_ARCH_V7M)
339  uint32_t basepri;
340
341  __asm__ volatile (
342    "mrs %[basepri], basepri\n"
343    "msr basepri, %[level]\n"
344    "msr basepri, %[basepri]\n"
345    : [basepri] "=&r" (basepri)
346    : [level] "r" (level)
347  );
348#endif
349}
350
351#define _CPU_ISR_Disable( _isr_cookie ) \
352  do { \
353    _isr_cookie = arm_interrupt_disable(); \
354  } while (0)
355
356#define _CPU_ISR_Enable( _isr_cookie )  \
357  arm_interrupt_enable( _isr_cookie )
358
359#define _CPU_ISR_Flash( _isr_cookie ) \
360  arm_interrupt_flash( _isr_cookie )
361
362void _CPU_ISR_Set_level( uint32_t level );
363
364uint32_t _CPU_ISR_Get_level( void );
365
366void _CPU_Context_Initialize(
367  Context_Control *the_context,
368  void *stack_area_begin,
369  size_t stack_area_size,
370  uint32_t new_level,
371  void (*entry_point)( void ),
372  bool is_fp
373);
374
375#define _CPU_Context_Get_SP( _context ) \
376  (_context)->register_sp
377
378#define _CPU_Context_Restart_self( _the_context ) \
379   _CPU_Context_restore( (_the_context) );
380
381#define _CPU_Context_Fp_start( _base, _offset ) \
382   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
383
384#define _CPU_Context_Initialize_fp( _destination ) \
385  do { \
386    *(*(_destination)) = _CPU_Null_fp_context; \
387  } while (0)
388
389#define _CPU_Fatal_halt( _err )             \
390   do {                                     \
391     uint32_t _level;                       \
392     uint32_t _error = _err;                \
393     _CPU_ISR_Disable( _level );            \
394     __asm__ volatile ("mov r0, %0\n"           \
395                   : "=r" (_error)          \
396                   : "0" (_error)           \
397                   : "r0" );                \
398     while (1);                             \
399   } while (0);
400
401/**
402 *  @brief CPU Initialize
403 *
404 */
405void _CPU_Initialize( void );
406
407void _CPU_ISR_install_vector(
408  uint32_t vector,
409  proc_ptr new_handler,
410  proc_ptr *old_handler
411);
412
413/**
414 *  @brief CPU Context Switch
415 *
416 */
417void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
418
419void _CPU_Context_restore( Context_Control *new_context )
420  RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
421
422#if defined(ARM_MULTILIB_ARCH_V7M)
423  void _ARMV7M_Start_multitasking( Context_Control *bsp, Context_Control *heir );
424  void _ARMV7M_Stop_multitasking( Context_Control *bsp )
425    RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
426  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
427  #define _CPU_Stop_multitasking _ARMV7M_Stop_multitasking
428#endif
429
430void _CPU_Context_save_fp( Context_Control_fp **fp_context_ptr );
431
432void _CPU_Context_restore_fp( Context_Control_fp **fp_context_ptr );
433
434static inline uint32_t CPU_swap_u32( uint32_t value )
435{
436#if defined(__thumb2__)
437  __asm__ volatile (
438    "rev %0, %0"
439    : "=r" (value)
440    : "0" (value)
441  );
442  return value;
443#elif defined(__thumb__)
444  uint32_t byte1, byte2, byte3, byte4, swapped;
445
446  byte4 = (value >> 24) & 0xff;
447  byte3 = (value >> 16) & 0xff;
448  byte2 = (value >> 8)  & 0xff;
449  byte1 =  value & 0xff;
450
451  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
452  return swapped;
453#else
454  uint32_t tmp = value; /* make compiler warnings go away */
455  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
456                "BIC %1, %1, #0xff0000\n"
457                "MOV %0, %0, ROR #8\n"
458                "EOR %0, %0, %1, LSR #8\n"
459                : "=r" (value), "=r" (tmp)
460                : "0" (value), "1" (tmp));
461  return value;
462#endif
463}
464
465static inline uint16_t CPU_swap_u16( uint16_t value )
466{
467#if defined(__thumb2__)
468  __asm__ volatile (
469    "rev16 %0, %0"
470    : "=r" (value)
471    : "0" (value)
472  );
473  return value;
474#else
475  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
476#endif
477}
478
479#if CPU_PROVIDES_IDLE_THREAD_BODY == TRUE
480  void *_CPU_Thread_Idle_body( uintptr_t ignored );
481#endif
482
483/** @} */
484
485#if defined(ARM_MULTILIB_ARCH_V4)
486
487/**
488 * @addtogroup ScoreCPUARM
489 *
490 * @{
491 */
492
493typedef struct {
494  uint32_t r0;
495  uint32_t r1;
496  uint32_t r2;
497  uint32_t r3;
498  uint32_t r4;
499  uint32_t r5;
500  uint32_t r6;
501  uint32_t r7;
502  uint32_t r8;
503  uint32_t r9;
504  uint32_t r10;
505  uint32_t r11;
506  uint32_t r12;
507  uint32_t sp;
508  uint32_t lr;
509  uint32_t pc;
510  uint32_t cpsr;
511} arm_cpu_context;
512
513typedef void arm_exc_abort_handler( arm_cpu_context *context );
514
515typedef enum {
516  ARM_EXCEPTION_RESET = 0,
517  ARM_EXCEPTION_UNDEF = 1,
518  ARM_EXCEPTION_SWI = 2,
519  ARM_EXCEPTION_PREF_ABORT = 3,
520  ARM_EXCEPTION_DATA_ABORT = 4,
521  ARM_EXCEPTION_RESERVED = 5,
522  ARM_EXCEPTION_IRQ = 6,
523  ARM_EXCEPTION_FIQ = 7,
524  MAX_EXCEPTIONS = 8
525} Arm_symbolic_exception_name;
526
527static inline uint32_t arm_status_irq_enable( void )
528{
529  uint32_t arm_switch_reg;
530  uint32_t psr;
531
532  RTEMS_COMPILER_MEMORY_BARRIER();
533
534  __asm__ volatile (
535    ARM_SWITCH_TO_ARM
536    "mrs %[psr], cpsr\n"
537    "bic %[arm_switch_reg], %[psr], #0x80\n"
538    "msr cpsr, %[arm_switch_reg]\n"
539    ARM_SWITCH_BACK
540    : [arm_switch_reg] "=&r" (arm_switch_reg), [psr] "=&r" (psr)
541  );
542
543  return psr;
544}
545
546static inline void arm_status_restore( uint32_t psr )
547{
548  ARM_SWITCH_REGISTERS;
549
550  __asm__ volatile (
551    ARM_SWITCH_TO_ARM
552    "msr cpsr, %[psr]\n"
553    ARM_SWITCH_BACK
554    : ARM_SWITCH_OUTPUT
555    : [psr] "r" (psr)
556  );
557
558  RTEMS_COMPILER_MEMORY_BARRIER();
559}
560
561void arm_exc_data_abort_set_handler( arm_exc_abort_handler handler );
562
563void arm_exc_data_abort( void );
564
565void arm_exc_prefetch_abort_set_handler( arm_exc_abort_handler handler );
566
567void arm_exc_prefetch_abort( void );
568
569void bsp_interrupt_dispatch( void );
570
571void arm_exc_interrupt( void );
572
573/** @} */
574
575/* XXX This is out of date */
576typedef struct {
577  uint32_t register_r0;
578  uint32_t register_r1;
579  uint32_t register_r2;
580  uint32_t register_r3;
581  uint32_t register_ip;
582  uint32_t register_lr;
583} CPU_Exception_frame;
584
585typedef CPU_Exception_frame CPU_Interrupt_frame;
586
587#else /* !defined(ARM_MULTILIB_ARCH_V4) */
588
589typedef void CPU_Interrupt_frame;
590
591/* FIXME */
592typedef CPU_Interrupt_frame CPU_Exception_frame;
593
594#endif /* !defined(ARM_MULTILIB_ARCH_V4) */
595
596void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
597
598#ifdef __cplusplus
599}
600#endif
601
602#endif /* ASM */
603
604#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.