source: rtems/cpukit/score/cpu/arm/include/rtems/score/cpu.h @ 4adaed73

Last change on this file since 4adaed73 was 4adaed73, checked in by Sebastian Huber <sebastian.huber@…>, on 07/27/21 at 09:08:54

score: Remove processor event broadcast/receive

Remove _CPU_SMP_Processor_event_broadcast() and
_CPU_SMP_Processor_event_receive(). These functions are hard to use since they
are subject to the lost wake up problem.

  • Property mode set to 100644
File size: 15.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief ARM Architecture Support API
5 */
6
7/*
8 *  This include file contains information pertaining to the ARM
9 *  processor.
10 *
11 *  Copyright (c) 2009, 2017 embedded brains GmbH
12 *
13 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14 *
15 *  Copyright (c) 2006 OAR Corporation
16 *
17 *  Copyright (c) 2002 Advent Networks, Inc.
18 *        Jay Monkman <jmonkman@adventnetworks.com>
19 *
20 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
21 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
22 *
23 *  The license and distribution terms for this file may be
24 *  found in the file LICENSE in this distribution or at
25 *  http://www.rtems.org/license/LICENSE.
26 *
27 */
28
29#ifndef _RTEMS_SCORE_CPU_H
30#define _RTEMS_SCORE_CPU_H
31
32#include <rtems/score/basedefs.h>
33#if defined(RTEMS_PARAVIRT)
34#include <rtems/score/paravirt.h>
35#endif
36#include <rtems/score/arm.h>
37
38/**
39 * @addtogroup RTEMSScoreCPUARM
40 *
41 * @{
42 */
43
44#if defined(ARM_MULTILIB_ARCH_V4)
45
46#if defined(__thumb__) && !defined(__thumb2__)
47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
52#else
53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
58#endif
59
60/**
61 * @name Program Status Register
62 */
63/**@{**/
64
65#define ARM_PSR_N (1 << 31)
66#define ARM_PSR_Z (1 << 30)
67#define ARM_PSR_C (1 << 29)
68#define ARM_PSR_V (1 << 28)
69#define ARM_PSR_Q (1 << 27)
70#define ARM_PSR_J (1 << 24)
71#define ARM_PSR_GE_SHIFT 16
72#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73#define ARM_PSR_E (1 << 9)
74#define ARM_PSR_A (1 << 8)
75#define ARM_PSR_I (1 << 7)
76#define ARM_PSR_F (1 << 6)
77#define ARM_PSR_T (1 << 5)
78#define ARM_PSR_M_SHIFT 0
79#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80#define ARM_PSR_M_USR 0x10
81#define ARM_PSR_M_FIQ 0x11
82#define ARM_PSR_M_IRQ 0x12
83#define ARM_PSR_M_SVC 0x13
84#define ARM_PSR_M_ABT 0x17
85#define ARM_PSR_M_HYP 0x1a
86#define ARM_PSR_M_UND 0x1b
87#define ARM_PSR_M_SYS 0x1f
88
89/** @} */
90
91#endif /* defined(ARM_MULTILIB_ARCH_V4) */
92
93/*
94 *  The ARM uses the PIC interrupt model.
95 */
96#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
97
98#define CPU_ISR_PASSES_FRAME_POINTER FALSE
99
100#define CPU_HARDWARE_FP FALSE
101
102#define CPU_SOFTWARE_FP FALSE
103
104#define CPU_ALL_TASKS_ARE_FP FALSE
105
106#define CPU_IDLE_TASK_IS_FP FALSE
107
108#define CPU_USE_DEFERRED_FP_SWITCH FALSE
109
110#define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
111
112#define CPU_STACK_GROWS_UP FALSE
113
114#if defined(ARM_MULTILIB_CACHE_LINE_MAX_64)
115  #define CPU_CACHE_LINE_BYTES 64
116#else
117  #define CPU_CACHE_LINE_BYTES 32
118#endif
119
120#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
121
122#define CPU_MODES_INTERRUPT_MASK 0x1
123
124#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
125
126#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
127
128#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
129
130/* AAPCS, section 4.1, Fundamental Data Types */
131#define CPU_SIZEOF_POINTER 4
132
133/* AAPCS, section 4.1, Fundamental Data Types */
134#define CPU_ALIGNMENT 8
135
136#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
137
138/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
139#define CPU_STACK_ALIGNMENT 8
140
141#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
142
143/*
144 * Bitfield handler macros.
145 *
146 * If we had a particularly fast function for finding the first
147 * bit set in a word, it would go here. Since we don't (*), we'll
148 * just use the universal macros.
149 *
150 * (*) On ARM V5 and later, there's a CLZ function which could be
151 *     used to implement much quicker than the default macro.
152 */
153
154#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
155
156#define CPU_USE_LIBC_INIT_FINI_ARRAY TRUE
157
158#define CPU_MAXIMUM_PROCESSORS 32
159
160#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
161  #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
162#endif
163
164#ifdef ARM_MULTILIB_VFP
165  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
166#endif
167
168#ifdef ARM_MULTILIB_ARCH_V4
169  #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 40
170#endif
171
172#ifdef RTEMS_SMP
173  #if defined(ARM_MULTILIB_VFP)
174    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
175  #elif defined(ARM_MULTILIB_HAS_THREAD_ID_REGISTER)
176    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
177  #else
178    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 44
179  #endif
180#endif
181
182#define ARM_EXCEPTION_FRAME_SIZE 80
183
184#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
185
186#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
187
188#define ARM_VFP_CONTEXT_SIZE 264
189
190#ifndef ASM
191
192#ifdef __cplusplus
193extern "C" {
194#endif
195
196typedef struct {
197#if defined(ARM_MULTILIB_ARCH_V4)
198  uint32_t register_r4;
199  uint32_t register_r5;
200  uint32_t register_r6;
201  uint32_t register_r7;
202  uint32_t register_r8;
203  uint32_t register_r9;
204  uint32_t register_r10;
205  uint32_t register_fp;
206  uint32_t register_sp;
207  uint32_t register_lr;
208  uint32_t isr_dispatch_disable;
209#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
210  uint32_t register_r4;
211  uint32_t register_r5;
212  uint32_t register_r6;
213  uint32_t register_r7;
214  uint32_t register_r8;
215  uint32_t register_r9;
216  uint32_t register_r10;
217  uint32_t register_r11;
218  void *register_lr;
219  void *register_sp;
220  uint32_t isr_nest_level;
221#else
222  void *register_sp;
223#endif
224#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
225  uint32_t thread_id;
226#endif
227#ifdef ARM_MULTILIB_VFP
228  uint64_t register_d8;
229  uint64_t register_d9;
230  uint64_t register_d10;
231  uint64_t register_d11;
232  uint64_t register_d12;
233  uint64_t register_d13;
234  uint64_t register_d14;
235  uint64_t register_d15;
236#endif
237#ifdef RTEMS_SMP
238  volatile bool is_executing;
239#endif
240} Context_Control;
241
242static inline void _ARM_Data_memory_barrier( void )
243{
244#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
245  __asm__ volatile ( "dmb" : : : "memory" );
246#else
247  RTEMS_COMPILER_MEMORY_BARRIER();
248#endif
249}
250
251static inline void _ARM_Data_synchronization_barrier( void )
252{
253#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
254  __asm__ volatile ( "dsb" : : : "memory" );
255#else
256  RTEMS_COMPILER_MEMORY_BARRIER();
257#endif
258}
259
260static inline void _ARM_Instruction_synchronization_barrier( void )
261{
262#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
263  __asm__ volatile ( "isb" : : : "memory" );
264#else
265  RTEMS_COMPILER_MEMORY_BARRIER();
266#endif
267}
268
269#if defined(ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE)
270uint32_t arm_interrupt_disable( void );
271void arm_interrupt_enable( uint32_t level );
272void arm_interrupt_flash( uint32_t level );
273#else
274static inline uint32_t arm_interrupt_disable( void )
275{
276  uint32_t level;
277
278#if defined(ARM_MULTILIB_ARCH_V4)
279  /*
280   * Disable only normal interrupts (IRQ).
281   *
282   * In order to support fast interrupts (FIQ) such that they can do something
283   * useful, we have to disable the operating system support for FIQs.  Having
284   * operating system support for them would require that FIQs are disabled
285   * during critical sections of the operating system and application.  At this
286   * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
287   * the non critical sections of IRQs, so here they would have a small
288   * advantage.  Without operating system support, the FIQs can execute at any
289   * time (of course not during the service of another FIQ). If someone needs
290   * operating system support for a FIQ, she can trigger a software interrupt and
291   * service the request in a two-step process.
292   */
293#if __ARM_ARCH >= 7
294  __asm__ volatile (
295    "mrs %0, cpsr\n"
296    "cpsid i\n"
297    "isb"
298    : "=&r" (level)
299  );
300#else
301  uint32_t arm_switch_reg;
302
303  __asm__ volatile (
304    ARM_SWITCH_TO_ARM
305    "mrs %[level], cpsr\n"
306    "orr %[arm_switch_reg], %[level], #0x80\n"
307    "msr cpsr, %[arm_switch_reg]\n"
308    ARM_SWITCH_BACK
309    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
310  );
311#endif
312#elif defined(ARM_MULTILIB_ARCH_V7M)
313  uint32_t basepri = 0x80;
314
315  __asm__ volatile (
316    "mrs %[level], basepri\n"
317    "msr basepri_max, %[basepri]\n"
318    : [level] "=&r" (level)
319    : [basepri] "r" (basepri)
320  );
321#endif
322
323  return level;
324}
325
326static inline void arm_interrupt_enable( uint32_t level )
327{
328#if defined(ARM_MULTILIB_ARCH_V4)
329  ARM_SWITCH_REGISTERS;
330
331  __asm__ volatile (
332    ARM_SWITCH_TO_ARM
333    "msr cpsr, %[level]\n"
334    ARM_SWITCH_BACK
335    : ARM_SWITCH_OUTPUT
336    : [level] "r" (level)
337  );
338#elif defined(ARM_MULTILIB_ARCH_V7M)
339  __asm__ volatile (
340    "msr basepri, %[level]\n"
341    :
342    : [level] "r" (level)
343  );
344#endif
345}
346
347static inline void arm_interrupt_flash( uint32_t level )
348{
349#if defined(ARM_MULTILIB_ARCH_V4)
350  uint32_t arm_switch_reg;
351
352  __asm__ volatile (
353    ARM_SWITCH_TO_ARM
354    "mrs %[arm_switch_reg], cpsr\n"
355    "msr cpsr, %[level]\n"
356    "msr cpsr, %[arm_switch_reg]\n"
357    ARM_SWITCH_BACK
358    : [arm_switch_reg] "=&r" (arm_switch_reg)
359    : [level] "r" (level)
360  );
361#elif defined(ARM_MULTILIB_ARCH_V7M)
362  uint32_t basepri;
363
364  __asm__ volatile (
365    "mrs %[basepri], basepri\n"
366    "msr basepri, %[level]\n"
367    "msr basepri, %[basepri]\n"
368    : [basepri] "=&r" (basepri)
369    : [level] "r" (level)
370  );
371#endif
372}
373#endif  /* !ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE */
374
375#define _CPU_ISR_Disable( _isr_cookie ) \
376  do { \
377    _isr_cookie = arm_interrupt_disable(); \
378  } while (0)
379
380#define _CPU_ISR_Enable( _isr_cookie )  \
381  arm_interrupt_enable( _isr_cookie )
382
383#define _CPU_ISR_Flash( _isr_cookie ) \
384  arm_interrupt_flash( _isr_cookie )
385
386RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint32_t level )
387{
388#if defined(ARM_MULTILIB_ARCH_V4)
389  return ( level & 0x80 ) == 0;
390#elif defined(ARM_MULTILIB_ARCH_V7M)
391  return level == 0;
392#endif
393}
394
395void _CPU_ISR_Set_level( uint32_t level );
396
397uint32_t _CPU_ISR_Get_level( void );
398
399void _CPU_Context_Initialize(
400  Context_Control *the_context,
401  void *stack_area_begin,
402  size_t stack_area_size,
403  uint32_t new_level,
404  void (*entry_point)( void ),
405  bool is_fp,
406  void *tls_area
407);
408
409#define _CPU_Context_Get_SP( _context ) \
410  (_context)->register_sp
411
412#ifdef RTEMS_SMP
413  static inline bool _CPU_Context_Get_is_executing(
414    const Context_Control *context
415  )
416  {
417    return context->is_executing;
418  }
419
420  static inline void _CPU_Context_Set_is_executing(
421    Context_Control *context,
422    bool is_executing
423  )
424  {
425    context->is_executing = is_executing;
426  }
427#endif
428
429#define _CPU_Context_Restart_self( _the_context ) \
430   _CPU_Context_restore( (_the_context) );
431
432#define _CPU_Context_Initialize_fp( _destination ) \
433  do { \
434    *(*(_destination)) = _CPU_Null_fp_context; \
435  } while (0)
436
437#define _CPU_Fatal_halt( _source, _err )    \
438   do {                                     \
439     uint32_t _level;                       \
440     uint32_t _error = _err;                \
441     _CPU_ISR_Disable( _level );            \
442     (void) _level;                         \
443     __asm__ volatile ("mov r0, %0\n"       \
444                   : "=r" (_error)          \
445                   : "0" (_error)           \
446                   : "r0" );                \
447     while (1);                             \
448   } while (0);
449
450/**
451 * @brief CPU initialization.
452 */
453void _CPU_Initialize( void );
454
455typedef void ( *CPU_ISR_handler )( void );
456
457void _CPU_ISR_install_vector(
458  uint32_t         vector,
459  CPU_ISR_handler  new_handler,
460  CPU_ISR_handler *old_handler
461);
462
463/**
464 * @brief CPU switch context.
465 */
466void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
467
468RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
469  Context_Control *executing,
470  Context_Control *heir
471);
472
473RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
474
475#if defined(ARM_MULTILIB_ARCH_V7M)
476  RTEMS_NO_RETURN void _ARMV7M_Start_multitasking( Context_Control *heir );
477  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
478#endif
479
480#ifdef RTEMS_SMP
481  uint32_t _CPU_SMP_Initialize( void );
482
483  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
484
485  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
486
487  void _CPU_SMP_Prepare_start_multitasking( void );
488
489  static inline uint32_t _CPU_SMP_Get_current_processor( void )
490  {
491    uint32_t mpidr;
492
493    /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
494    __asm__ volatile (
495      "mrc p15, 0, %[mpidr], c0, c0, 5\n"
496      : [mpidr] "=&r" (mpidr)
497    );
498
499    return mpidr & 0xffU;
500  }
501
502  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
503
504  static inline void _ARM_Send_event( void )
505  {
506    __asm__ volatile ( "sev" : : : "memory" );
507  }
508
509  static inline void _ARM_Wait_for_event( void )
510  {
511    __asm__ volatile ( "wfe" : : : "memory" );
512  }
513#endif
514
515
516static inline uint32_t CPU_swap_u32( uint32_t value )
517{
518#if defined(__thumb2__)
519  __asm__ volatile (
520    "rev %0, %0"
521    : "=r" (value)
522    : "0" (value)
523  );
524  return value;
525#elif defined(__thumb__)
526  uint32_t byte1, byte2, byte3, byte4, swapped;
527
528  byte4 = (value >> 24) & 0xff;
529  byte3 = (value >> 16) & 0xff;
530  byte2 = (value >> 8)  & 0xff;
531  byte1 =  value & 0xff;
532
533  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
534  return swapped;
535#else
536  uint32_t tmp = value; /* make compiler warnings go away */
537  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
538                "BIC %1, %1, #0xff0000\n"
539                "MOV %0, %0, ROR #8\n"
540                "EOR %0, %0, %1, LSR #8\n"
541                : "=r" (value), "=r" (tmp)
542                : "0" (value), "1" (tmp));
543  return value;
544#endif
545}
546
547static inline uint16_t CPU_swap_u16( uint16_t value )
548{
549#if defined(__thumb2__)
550  __asm__ volatile (
551    "rev16 %0, %0"
552    : "=r" (value)
553    : "0" (value)
554  );
555  return value;
556#else
557  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
558#endif
559}
560
561typedef uint32_t CPU_Counter_ticks;
562
563uint32_t _CPU_Counter_frequency( void );
564
565CPU_Counter_ticks _CPU_Counter_read( void );
566
567static inline CPU_Counter_ticks _CPU_Counter_difference(
568  CPU_Counter_ticks second,
569  CPU_Counter_ticks first
570)
571{
572  return second - first;
573}
574
575void *_CPU_Thread_Idle_body( uintptr_t ignored );
576
577#if defined(ARM_MULTILIB_ARCH_V4)
578
579typedef enum {
580  ARM_EXCEPTION_RESET = 0,
581  ARM_EXCEPTION_UNDEF = 1,
582  ARM_EXCEPTION_SWI = 2,
583  ARM_EXCEPTION_PREF_ABORT = 3,
584  ARM_EXCEPTION_DATA_ABORT = 4,
585  ARM_EXCEPTION_RESERVED = 5,
586  ARM_EXCEPTION_IRQ = 6,
587  ARM_EXCEPTION_FIQ = 7,
588  MAX_EXCEPTIONS = 8,
589  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
590} Arm_symbolic_exception_name;
591
592#endif /* defined(ARM_MULTILIB_ARCH_V4) */
593
594typedef struct {
595  uint32_t register_fpexc;
596  uint32_t register_fpscr;
597  uint64_t register_d0;
598  uint64_t register_d1;
599  uint64_t register_d2;
600  uint64_t register_d3;
601  uint64_t register_d4;
602  uint64_t register_d5;
603  uint64_t register_d6;
604  uint64_t register_d7;
605  uint64_t register_d8;
606  uint64_t register_d9;
607  uint64_t register_d10;
608  uint64_t register_d11;
609  uint64_t register_d12;
610  uint64_t register_d13;
611  uint64_t register_d14;
612  uint64_t register_d15;
613  uint64_t register_d16;
614  uint64_t register_d17;
615  uint64_t register_d18;
616  uint64_t register_d19;
617  uint64_t register_d20;
618  uint64_t register_d21;
619  uint64_t register_d22;
620  uint64_t register_d23;
621  uint64_t register_d24;
622  uint64_t register_d25;
623  uint64_t register_d26;
624  uint64_t register_d27;
625  uint64_t register_d28;
626  uint64_t register_d29;
627  uint64_t register_d30;
628  uint64_t register_d31;
629} ARM_VFP_context;
630
631typedef struct {
632  uint32_t register_r0;
633  uint32_t register_r1;
634  uint32_t register_r2;
635  uint32_t register_r3;
636  uint32_t register_r4;
637  uint32_t register_r5;
638  uint32_t register_r6;
639  uint32_t register_r7;
640  uint32_t register_r8;
641  uint32_t register_r9;
642  uint32_t register_r10;
643  uint32_t register_r11;
644  uint32_t register_r12;
645  uint32_t register_sp;
646  void *register_lr;
647  void *register_pc;
648#if defined(ARM_MULTILIB_ARCH_V4)
649  uint32_t register_cpsr;
650  Arm_symbolic_exception_name vector;
651#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
652  uint32_t register_xpsr;
653  uint32_t vector;
654#endif
655  const ARM_VFP_context *vfp_context;
656  uint32_t reserved_for_stack_alignment;
657} CPU_Exception_frame;
658
659void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
660
661void _ARM_Exception_default( CPU_Exception_frame *frame );
662
663/** Type that can store a 32-bit integer or a pointer. */
664typedef uintptr_t CPU_Uint32ptr;
665
666#ifdef __cplusplus
667}
668#endif
669
670#endif /* ASM */
671
672/** @} */
673
674#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.