source: rtems/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h @ 5f652cb2

Last change on this file since 5f652cb2 was 5f652cb2, checked in by Kinsey Moore <kinsey.moore@…>, on 07/26/21 at 20:43:00

cpukit: Add AArch64 SMP Support

This adds SMP support for AArch64 in cpukit and for the ZynqMP BSPs.

  • Property mode set to 100644
File size: 13.5 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSScoreCPU
7 *
8 * @brief AArch64 Architecture Support API
9 */
10
11/*
12 * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
13 * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 *    notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 *    notice, this list of conditions and the following disclaimer in the
22 *    documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _RTEMS_SCORE_CPU_H
38#define _RTEMS_SCORE_CPU_H
39
40#include <rtems/score/basedefs.h>
41#if defined(RTEMS_PARAVIRT)
42#include <rtems/score/paravirt.h>
43#endif
44#include <rtems/score/aarch64.h>
45#include <libcpu/vectors.h>
46
47/**
48 * @addtogroup RTEMSScoreCPUAArch64
49 *
50 * @{
51 */
52
53/**
54 * @name Program State Registers
55 */
56/**@{**/
57
58#define AARCH64_PSTATE_N (1LL << 31)
59#define AARCH64_PSTATE_Z (1LL << 30)
60#define AARCH64_PSTATE_C (1LL << 29)
61#define AARCH64_PSTATE_V (1LL << 28)
62#define AARCH64_PSTATE_D (1LL << 9)
63#define AARCH64_PSTATE_A (1LL << 8)
64#define AARCH64_PSTATE_I (1LL << 7)
65#define AARCH64_PSTATE_F (1LL << 6)
66
67/** @} */
68
69/*
70 *  AArch64 uses the PIC interrupt model.
71 */
72#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
73
74#define CPU_ISR_PASSES_FRAME_POINTER FALSE
75
76#define CPU_HARDWARE_FP FALSE
77
78#define CPU_SOFTWARE_FP FALSE
79
80#define CPU_ALL_TASKS_ARE_FP FALSE
81
82#define CPU_IDLE_TASK_IS_FP FALSE
83
84#define CPU_USE_DEFERRED_FP_SWITCH FALSE
85
86#define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
87
88#define CPU_STACK_GROWS_UP FALSE
89
90#if defined(AARCH64_MULTILIB_CACHE_LINE_MAX_64)
91  #define CPU_CACHE_LINE_BYTES 64
92#else
93  #define CPU_CACHE_LINE_BYTES 32
94#endif
95
96#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
97
98#define CPU_MODES_INTERRUPT_MASK 0x1
99
100#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
101
102#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
103
104#define CPU_STACK_MINIMUM_SIZE (1024 * 10)
105
106/* This could be either 4 or 8, depending on the ABI in use.
107 * Could also use __LP64__ or __ILP32__ */
108/* AAPCS64, section 5.1, Fundamental Data Types */
109#define CPU_SIZEOF_POINTER __SIZEOF_POINTER__
110
111/* AAPCS64, section 5.1, Fundamental Data Types */
112#define CPU_ALIGNMENT 16
113
114#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
115
116/* AAPCS64, section 6.2.2, Stack constraints at a public interface */
117#define CPU_STACK_ALIGNMENT 16
118
119#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
120
121#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
122
123#define CPU_USE_LIBC_INIT_FINI_ARRAY TRUE
124
125#define CPU_MAXIMUM_PROCESSORS 32
126
127#define AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET 0x70
128
129#ifdef AARCH64_MULTILIB_VFP
130  #define AARCH64_CONTEXT_CONTROL_D8_OFFSET 0x78
131#endif
132
133#define AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 0x68
134
135#ifdef RTEMS_SMP
136  #if defined(AARCH64_MULTILIB_VFP)
137    #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0xb8
138  #else
139    #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x78
140  #endif
141#endif
142
143#define AARCH64_EXCEPTION_FRAME_SIZE 0x350
144
145#define AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET 0xF8
146#define AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET 0xF0
147#define AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET 0x108
148#define AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET 0x118
149#define AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET 0x128
150#define AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET 0x138
151#define AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET 0x150
152
153#ifndef ASM
154
155#ifdef __cplusplus
156extern "C" {
157#endif
158
159typedef unsigned __int128 uint128_t;
160
161typedef struct {
162  uint64_t register_x19;
163  uint64_t register_x20;
164  uint64_t register_x21;
165  uint64_t register_x22;
166  uint64_t register_x23;
167  uint64_t register_x24;
168  uint64_t register_x25;
169  uint64_t register_x26;
170  uint64_t register_x27;
171  uint64_t register_x28;
172  uint64_t register_fp;
173  uint64_t register_lr;
174  uint64_t register_sp;
175  uint64_t isr_dispatch_disable;
176  uint64_t thread_id;
177#ifdef AARCH64_MULTILIB_VFP
178  uint64_t register_d8;
179  uint64_t register_d9;
180  uint64_t register_d10;
181  uint64_t register_d11;
182  uint64_t register_d12;
183  uint64_t register_d13;
184  uint64_t register_d14;
185  uint64_t register_d15;
186#endif
187#ifdef RTEMS_SMP
188  volatile bool is_executing;
189#endif
190} Context_Control;
191
192static inline void _AARCH64_Data_memory_barrier( void )
193{
194  __asm__ volatile ( "dmb SY" : : : "memory" );
195}
196
197static inline void _AARCH64_Data_synchronization_barrier( void )
198{
199  __asm__ volatile ( "dsb SY" : : : "memory" );
200}
201
202static inline void _AARCH64_Instruction_synchronization_barrier( void )
203{
204  __asm__ volatile ( "isb" : : : "memory" );
205}
206
207void _CPU_ISR_Set_level( uint64_t level );
208
209uint64_t _CPU_ISR_Get_level( void );
210
211#if defined(AARCH64_DISABLE_INLINE_ISR_DISABLE_ENABLE)
212uint64_t AArch64_interrupt_disable( void );
213void AArch64_interrupt_enable( uint64_t isr_cookie );
214void AArch64_interrupt_flash( uint64_t isr_cookie );
215#else
216static inline uint64_t AArch64_interrupt_disable( void )
217{
218  uint64_t isr_cookie;
219
220  __asm__ volatile (
221    "mrs %[isr_cookie], DAIF\n"
222    "msr DAIFSet, #0x2\n"
223    : [isr_cookie] "=&r" (isr_cookie)
224  );
225
226  return isr_cookie;
227}
228
229static inline void AArch64_interrupt_enable( uint64_t isr_cookie )
230{
231  __asm__ volatile (
232    "msr DAIF, %[isr_cookie]\n"
233    : : [isr_cookie] "r" (isr_cookie)
234  );
235}
236
237static inline void AArch64_interrupt_flash( uint64_t isr_cookie )
238{
239  AArch64_interrupt_enable(isr_cookie);
240  AArch64_interrupt_disable();
241}
242#endif  /* !AARCH64_DISABLE_INLINE_ISR_DISABLE_ENABLE */
243
244#define _CPU_ISR_Disable( _isr_cookie ) \
245  do { \
246    _isr_cookie = AArch64_interrupt_disable(); \
247  } while (0)
248
249#define _CPU_ISR_Enable( _isr_cookie )  \
250  AArch64_interrupt_enable( _isr_cookie )
251
252#define _CPU_ISR_Flash( _isr_cookie ) \
253  AArch64_interrupt_flash( _isr_cookie )
254
255RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint64_t isr_cookie )
256{
257  return ( isr_cookie & AARCH64_PSTATE_I ) == 0;
258}
259
260void _CPU_Context_Initialize(
261  Context_Control *the_context,
262  void *stack_area_begin,
263  size_t stack_area_size,
264  uint64_t new_level,
265  void (*entry_point)( void ),
266  bool is_fp,
267  void *tls_area
268);
269
270#define _CPU_Context_Get_SP( _context ) \
271  (_context)->register_sp
272
273#ifdef RTEMS_SMP
274  static inline bool _CPU_Context_Get_is_executing(
275    const Context_Control *context
276  )
277  {
278    return context->is_executing;
279  }
280
281  static inline void _CPU_Context_Set_is_executing(
282    Context_Control *context,
283    bool is_executing
284  )
285  {
286    context->is_executing = is_executing;
287  }
288#endif
289
290#define _CPU_Context_Restart_self( _the_context ) \
291   _CPU_Context_restore( (_the_context) );
292
293#define _CPU_Context_Initialize_fp( _destination ) \
294  do { \
295    *(*(_destination)) = _CPU_Null_fp_context; \
296  } while (0)
297
298/**
299 * @brief CPU initialization.
300 */
301void _CPU_Initialize( void );
302
303typedef void ( *CPU_ISR_handler )( void );
304
305void _CPU_ISR_install_vector(
306  uint32_t         vector,
307  CPU_ISR_handler  new_handler,
308  CPU_ISR_handler *old_handler
309);
310
311/**
312 * @brief CPU switch context.
313 */
314void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
315
316RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
317  Context_Control *executing,
318  Context_Control *heir
319);
320
321RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
322
323#ifdef RTEMS_SMP
324  uint32_t _CPU_SMP_Initialize( void );
325
326  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
327
328  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
329
330  void _CPU_SMP_Prepare_start_multitasking( void );
331
332  static inline uint32_t _CPU_SMP_Get_current_processor( void )
333  {
334    uint32_t mpidr;
335
336    __asm__ volatile (
337      "mrs %[mpidr], mpidr_el1\n"
338      : [mpidr] "=&r" (mpidr)
339    );
340
341    return mpidr & 0xffU;
342  }
343
344  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
345
346  static inline void _AARCH64_Send_event( void )
347  {
348    __asm__ volatile ( "sev" : : : "memory" );
349  }
350
351  static inline void _AARCH64_Wait_for_event( void )
352  {
353    __asm__ volatile ( "wfe" : : : "memory" );
354  }
355#endif
356
357
358static inline uint32_t CPU_swap_u32( uint32_t value )
359{
360  uint32_t tmp = value; /* make compiler warnings go away */
361  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
362                "BIC %1, %1, #0xff0000\n"
363                "MOV %0, %0, ROR #8\n"
364                "EOR %0, %0, %1, LSR #8\n"
365                : "=r" (value), "=r" (tmp)
366                : "0" (value), "1" (tmp));
367  return value;
368}
369
370static inline uint16_t CPU_swap_u16( uint16_t value )
371{
372  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
373}
374
375typedef uint32_t CPU_Counter_ticks;
376
377uint32_t _CPU_Counter_frequency( void );
378
379CPU_Counter_ticks _CPU_Counter_read( void );
380
381static inline CPU_Counter_ticks _CPU_Counter_difference(
382  CPU_Counter_ticks second,
383  CPU_Counter_ticks first
384)
385{
386  return second - first;
387}
388
389void *_CPU_Thread_Idle_body( uintptr_t ignored );
390
391typedef enum {
392  AARCH64_EXCEPTION_SP0_SYNCHRONOUS = 0,
393  AARCH64_EXCEPTION_SP0_IRQ = 1,
394  AARCH64_EXCEPTION_SP0_FIQ = 2,
395  AARCH64_EXCEPTION_SP0_SERROR = 3,
396  AARCH64_EXCEPTION_SPx_SYNCHRONOUS = 4,
397  AARCH64_EXCEPTION_SPx_IRQ = 5,
398  AARCH64_EXCEPTION_SPx_FIQ = 6,
399  AARCH64_EXCEPTION_SPx_SERROR = 7,
400  AARCH64_EXCEPTION_LEL64_SYNCHRONOUS = 8,
401  AARCH64_EXCEPTION_LEL64_IRQ = 9,
402  AARCH64_EXCEPTION_LEL64_FIQ = 10,
403  AARCH64_EXCEPTION_LEL64_SERROR = 11,
404  AARCH64_EXCEPTION_LEL32_SYNCHRONOUS = 12,
405  AARCH64_EXCEPTION_LEL32_IRQ = 13,
406  AARCH64_EXCEPTION_LEL32_FIQ = 14,
407  AARCH64_EXCEPTION_LEL32_SERROR = 15,
408  MAX_EXCEPTIONS = 16,
409  AARCH64_EXCEPTION_MAKE_ENUM_64_BIT = 0xffffffffffffffff
410} AArch64_symbolic_exception_name;
411
412#define VECTOR_POINTER_OFFSET 0x78
413#define VECTOR_ENTRY_SIZE 0x80
414void _AArch64_Exception_interrupt_no_nest( void );
415void _AArch64_Exception_interrupt_nest( void );
416static inline void* AArch64_set_exception_handler(
417  AArch64_symbolic_exception_name exception,
418  void (*handler)(void)
419)
420{
421  /* get current table address */
422  char *vbar = (char*)AArch64_get_vector_base_address();
423
424  /* calculate address of vector to be replaced */
425  char *cvector_address = vbar + VECTOR_ENTRY_SIZE * exception
426    + VECTOR_POINTER_OFFSET;
427
428  /* get current vector pointer */
429  void (**vector_address)(void) = (void(**)(void))cvector_address;
430  void (*current_vector_pointer)(void);
431  current_vector_pointer = *vector_address;
432
433  /* replace vector pointer */
434  *vector_address = handler;
435
436  /* return now-previous vector pointer */
437  return (void*)current_vector_pointer;
438}
439
440typedef struct {
441  uint64_t register_x0;
442  uint64_t register_x1;
443  uint64_t register_x2;
444  uint64_t register_x3;
445  uint64_t register_x4;
446  uint64_t register_x5;
447  uint64_t register_x6;
448  uint64_t register_x7;
449  uint64_t register_x8;
450  uint64_t register_x9;
451  uint64_t register_x10;
452  uint64_t register_x11;
453  uint64_t register_x12;
454  uint64_t register_x13;
455  uint64_t register_x14;
456  uint64_t register_x15;
457  uint64_t register_x16;
458  uint64_t register_x17;
459  uint64_t register_x18;
460  uint64_t register_x19;
461  uint64_t register_x20;
462  uint64_t register_x21;
463  uint64_t register_x22;
464  uint64_t register_x23;
465  uint64_t register_x24;
466  uint64_t register_x25;
467  uint64_t register_x26;
468  uint64_t register_x27;
469  uint64_t register_x28;
470  uint64_t register_fp;
471  void *register_lr;
472#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
473  uint32_t _register_lr_top;
474#endif
475  uintptr_t register_sp;
476#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
477  uint32_t _register_sp_top;
478#endif
479  void *register_pc;
480#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
481  uint32_t _register_pc_top;
482#endif
483  uint64_t register_daif;
484  uint64_t register_cpsr;
485  uint64_t register_syndrome;
486  uint64_t register_fault_address;
487  AArch64_symbolic_exception_name vector;
488  uint64_t reserved_for_stack_alignment;
489  uint64_t register_fpsr;
490  uint64_t register_fpcr;
491  uint128_t register_q0;
492  uint128_t register_q1;
493  uint128_t register_q2;
494  uint128_t register_q3;
495  uint128_t register_q4;
496  uint128_t register_q5;
497  uint128_t register_q6;
498  uint128_t register_q7;
499  uint128_t register_q8;
500  uint128_t register_q9;
501  uint128_t register_q10;
502  uint128_t register_q11;
503  uint128_t register_q12;
504  uint128_t register_q13;
505  uint128_t register_q14;
506  uint128_t register_q15;
507  uint128_t register_q16;
508  uint128_t register_q17;
509  uint128_t register_q18;
510  uint128_t register_q19;
511  uint128_t register_q20;
512  uint128_t register_q21;
513  uint128_t register_q22;
514  uint128_t register_q23;
515  uint128_t register_q24;
516  uint128_t register_q25;
517  uint128_t register_q26;
518  uint128_t register_q27;
519  uint128_t register_q28;
520  uint128_t register_q29;
521  uint128_t register_q30;
522  uint128_t register_q31;
523} CPU_Exception_frame;
524
525void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
526
527void _AArch64_Exception_default( CPU_Exception_frame *frame );
528
529/** Type that can store a 32-bit integer or a pointer. */
530typedef uintptr_t CPU_Uint32ptr;
531
532#ifdef __cplusplus
533}
534#endif
535
536#endif /* ASM */
537
538/** @} */
539
540#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.