source: rtems/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h @ d7a48e1

Last change on this file since d7a48e1 was d7a48e1, checked in by Sebastian Huber <sebastian.huber@…>, on 10/06/20 at 05:39:44

rtems: Improve RTEMS_NO_RETURN attribute

Provide RTEMS_NO_RETURN also in case RTEMS_DEBUG is defined to prevent errors
like this:

error: no return statement in function returning non-void [-Werror=return-type]

Use C11 and C++11 standard means to declare a no-return function.

Close #4122.

  • Property mode set to 100644
File size: 14.0 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSScoreCPU
7 *
8 * @brief AArch64 Architecture Support API
9 */
10
11/*
12 * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
13 * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 *    notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 *    notice, this list of conditions and the following disclaimer in the
22 *    documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _RTEMS_SCORE_CPU_H
38#define _RTEMS_SCORE_CPU_H
39
40#include <rtems/score/basedefs.h>
41#if defined(RTEMS_PARAVIRT)
42#include <rtems/score/paravirt.h>
43#endif
44#include <rtems/score/aarch64.h>
45#include <libcpu/vectors.h>
46
47/**
48 * @addtogroup RTEMSScoreCPUAArch64
49 *
50 * @{
51 */
52
53/**
54 * @name Program State Registers
55 */
56/**@{**/
57
58#define AARCH64_PSTATE_N (1LL << 31)
59#define AARCH64_PSTATE_Z (1LL << 30)
60#define AARCH64_PSTATE_C (1LL << 29)
61#define AARCH64_PSTATE_V (1LL << 28)
62#define AARCH64_PSTATE_D (1LL << 9)
63#define AARCH64_PSTATE_A (1LL << 8)
64#define AARCH64_PSTATE_I (1LL << 7)
65#define AARCH64_PSTATE_F (1LL << 6)
66
67/** @} */
68
69/*
70 *  AArch64 uses the PIC interrupt model.
71 */
72#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
73
74#define CPU_ISR_PASSES_FRAME_POINTER FALSE
75
76#define CPU_HARDWARE_FP FALSE
77
78#define CPU_SOFTWARE_FP FALSE
79
80#define CPU_ALL_TASKS_ARE_FP FALSE
81
82#define CPU_IDLE_TASK_IS_FP FALSE
83
84#define CPU_USE_DEFERRED_FP_SWITCH FALSE
85
86#define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
87
88#define CPU_STACK_GROWS_UP FALSE
89
90#if defined(AARCH64_MULTILIB_CACHE_LINE_MAX_64)
91  #define CPU_CACHE_LINE_BYTES 64
92#else
93  #define CPU_CACHE_LINE_BYTES 32
94#endif
95
96#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
97
98#define CPU_MODES_INTERRUPT_MASK 0x1
99
100#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
101
102#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
103
104#define CPU_STACK_MINIMUM_SIZE (1024 * 10)
105
106/* This could be either 4 or 8, depending on the ABI in use.
107 * Could also use __LP64__ or __ILP32__ */
108/* AAPCS64, section 5.1, Fundamental Data Types */
109#define CPU_SIZEOF_POINTER __SIZEOF_POINTER__
110
111/* AAPCS64, section 5.1, Fundamental Data Types */
112#define CPU_ALIGNMENT 16
113
114#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
115
116/* AAPCS64, section 6.2.2, Stack constraints at a public interface */
117#define CPU_STACK_ALIGNMENT 16
118
119#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
120
121#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
122
123#define CPU_USE_LIBC_INIT_FINI_ARRAY TRUE
124
125#define CPU_MAXIMUM_PROCESSORS 32
126
127#define AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET 112
128
129#ifdef AARCH64_MULTILIB_VFP
130  #define AARCH64_CONTEXT_CONTROL_D8_OFFSET 120
131#endif
132
133#define AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 104
134
135#ifdef RTEMS_SMP
136  #if defined(AARCH64_MULTILIB_VFP)
137    #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
138  #else
139    #define AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
140  #endif
141#endif
142
143#define AARCH64_EXCEPTION_FRAME_SIZE 848
144
145#define AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET 248
146#define AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET 240
147#define AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET 264
148#define AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET 280
149#define AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET 296
150#define AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET 312
151#define AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET 336
152
153#ifndef ASM
154
155#ifdef __cplusplus
156extern "C" {
157#endif
158
159typedef unsigned __int128 uint128_t;
160
161typedef struct {
162  uint64_t register_x19;
163  uint64_t register_x20;
164  uint64_t register_x21;
165  uint64_t register_x22;
166  uint64_t register_x23;
167  uint64_t register_x24;
168  uint64_t register_x25;
169  uint64_t register_x26;
170  uint64_t register_x27;
171  uint64_t register_x28;
172  uint64_t register_fp;
173  uint64_t register_lr;
174  uint64_t register_sp;
175  uint64_t isr_dispatch_disable;
176  uint64_t thread_id;
177#ifdef AARCH64_MULTILIB_VFP
178  uint64_t register_d8;
179  uint64_t register_d9;
180  uint64_t register_d10;
181  uint64_t register_d11;
182  uint64_t register_d12;
183  uint64_t register_d13;
184  uint64_t register_d14;
185  uint64_t register_d15;
186#endif
187#ifdef RTEMS_SMP
188  volatile bool is_executing;
189#endif
190} Context_Control;
191
192static inline void _AARCH64_Data_memory_barrier( void )
193{
194  __asm__ volatile ( "dmb LD" : : : "memory" );
195}
196
197static inline void _AARCH64_Data_synchronization_barrier( void )
198{
199  __asm__ volatile ( "dsb LD" : : : "memory" );
200}
201
202static inline void _AARCH64_Instruction_synchronization_barrier( void )
203{
204  __asm__ volatile ( "isb" : : : "memory" );
205}
206
207void _CPU_ISR_Set_level( uint64_t level );
208
209uint64_t _CPU_ISR_Get_level( void );
210
211#if defined(AARCH64_DISABLE_INLINE_ISR_DISABLE_ENABLE)
212uint64_t AArch64_interrupt_disable( void );
213void AArch64_interrupt_enable( uint64_t level );
214void AArch64_interrupt_flash( uint64_t level );
215#else
216static inline uint64_t AArch64_interrupt_disable( void )
217{
218  uint64_t level = _CPU_ISR_Get_level();
219  __asm__ volatile (
220    "msr DAIFSet, #0x2\n"
221  );
222  return level;
223}
224
225static inline void AArch64_interrupt_enable( uint64_t level )
226{
227  __asm__ volatile (
228    "msr DAIF, %[level]\n"
229    : : [level] "r" (level)
230  );
231}
232
233static inline void AArch64_interrupt_flash( uint64_t level )
234{
235  AArch64_interrupt_enable(level);
236  AArch64_interrupt_disable();
237}
238#endif  /* !AARCH64_DISABLE_INLINE_ISR_DISABLE_ENABLE */
239
240#define _CPU_ISR_Disable( _isr_cookie ) \
241  do { \
242    _isr_cookie = AArch64_interrupt_disable(); \
243  } while (0)
244
245#define _CPU_ISR_Enable( _isr_cookie )  \
246  AArch64_interrupt_enable( _isr_cookie )
247
248#define _CPU_ISR_Flash( _isr_cookie ) \
249  AArch64_interrupt_flash( _isr_cookie )
250
251RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled( uint64_t level )
252{
253  return ( level & AARCH64_PSTATE_I ) == 0;
254}
255
256void _CPU_Context_Initialize(
257  Context_Control *the_context,
258  void *stack_area_begin,
259  size_t stack_area_size,
260  uint64_t new_level,
261  void (*entry_point)( void ),
262  bool is_fp,
263  void *tls_area
264);
265
266#define _CPU_Context_Get_SP( _context ) \
267  (_context)->register_sp
268
269#ifdef RTEMS_SMP
270  static inline bool _CPU_Context_Get_is_executing(
271    const Context_Control *context
272  )
273  {
274    return context->is_executing;
275  }
276
277  static inline void _CPU_Context_Set_is_executing(
278    Context_Control *context,
279    bool is_executing
280  )
281  {
282    context->is_executing = is_executing;
283  }
284#endif
285
286#define _CPU_Context_Restart_self( _the_context ) \
287   _CPU_Context_restore( (_the_context) );
288
289#define _CPU_Context_Initialize_fp( _destination ) \
290  do { \
291    *(*(_destination)) = _CPU_Null_fp_context; \
292  } while (0)
293
294#define _CPU_Fatal_halt( _source, _err )    \
295   do {                                     \
296     uint64_t _level;                       \
297     uint32_t _error = _err;                \
298     _CPU_ISR_Disable( _level );            \
299     (void) _level;                         \
300     __asm__ volatile ("mov x0, %0\n"       \
301                   : "=r" (_error)          \
302                   : "0" (_error)           \
303                   : "x0" );                \
304     while (1);                             \
305   } while (0);
306
307/**
308 * @brief CPU initialization.
309 */
310void _CPU_Initialize( void );
311
312typedef void ( *CPU_ISR_handler )( void );
313
314void _CPU_ISR_install_vector(
315  uint32_t         vector,
316  CPU_ISR_handler  new_handler,
317  CPU_ISR_handler *old_handler
318);
319
320/**
321 * @brief CPU switch context.
322 */
323void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
324
325RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
326
327#ifdef RTEMS_SMP
328  uint32_t _CPU_SMP_Initialize( void );
329
330  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
331
332  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
333
334  void _CPU_SMP_Prepare_start_multitasking( void );
335
336  static inline uint32_t _CPU_SMP_Get_current_processor( void )
337  {
338    uint32_t mpidr;
339
340    __asm__ volatile (
341      "mrs %[mpidr], mpidr_el1\n"
342      : [mpidr] "=&r" (mpidr)
343    );
344
345    return mpidr & 0xffU;
346  }
347
348  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
349
350  static inline void _AARCH64_Send_event( void )
351  {
352    __asm__ volatile ( "sev" : : : "memory" );
353  }
354
355  static inline void _AARCH64_Wait_for_event( void )
356  {
357    __asm__ volatile ( "wfe" : : : "memory" );
358  }
359
360  static inline void _CPU_SMP_Processor_event_broadcast( void )
361  {
362    _AARCH64_Data_synchronization_barrier();
363    _AARCH64_Send_event();
364  }
365
366  static inline void _CPU_SMP_Processor_event_receive( void )
367  {
368    _AARCH64_Wait_for_event();
369    _AARCH64_Data_memory_barrier();
370  }
371#endif
372
373
374static inline uint32_t CPU_swap_u32( uint32_t value )
375{
376  uint32_t tmp = value; /* make compiler warnings go away */
377  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
378                "BIC %1, %1, #0xff0000\n"
379                "MOV %0, %0, ROR #8\n"
380                "EOR %0, %0, %1, LSR #8\n"
381                : "=r" (value), "=r" (tmp)
382                : "0" (value), "1" (tmp));
383  return value;
384}
385
386static inline uint16_t CPU_swap_u16( uint16_t value )
387{
388  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
389}
390
391typedef uint32_t CPU_Counter_ticks;
392
393uint32_t _CPU_Counter_frequency( void );
394
395CPU_Counter_ticks _CPU_Counter_read( void );
396
397static inline CPU_Counter_ticks _CPU_Counter_difference(
398  CPU_Counter_ticks second,
399  CPU_Counter_ticks first
400)
401{
402  return second - first;
403}
404
405void *_CPU_Thread_Idle_body( uintptr_t ignored );
406
407typedef enum {
408  AARCH64_EXCEPTION_SP0_SYNCHRONOUS = 0,
409  AARCH64_EXCEPTION_SP0_IRQ = 1,
410  AARCH64_EXCEPTION_SP0_FIQ = 2,
411  AARCH64_EXCEPTION_SP0_SERROR = 3,
412  AARCH64_EXCEPTION_SPx_SYNCHRONOUS = 4,
413  AARCH64_EXCEPTION_SPx_IRQ = 5,
414  AARCH64_EXCEPTION_SPx_FIQ = 6,
415  AARCH64_EXCEPTION_SPx_SERROR = 7,
416  AARCH64_EXCEPTION_LEL64_SYNCHRONOUS = 8,
417  AARCH64_EXCEPTION_LEL64_IRQ = 9,
418  AARCH64_EXCEPTION_LEL64_FIQ = 10,
419  AARCH64_EXCEPTION_LEL64_SERROR = 11,
420  AARCH64_EXCEPTION_LEL32_SYNCHRONOUS = 12,
421  AARCH64_EXCEPTION_LEL32_IRQ = 13,
422  AARCH64_EXCEPTION_LEL32_FIQ = 14,
423  AARCH64_EXCEPTION_LEL32_SERROR = 15,
424  MAX_EXCEPTIONS = 16,
425  AARCH64_EXCEPTION_MAKE_ENUM_64_BIT = 0xffffffffffffffff
426} AArch64_symbolic_exception_name;
427
428#define VECTOR_POINTER_OFFSET 0x78
429#define VECTOR_ENTRY_SIZE 0x80
430void _AArch64_Exception_interrupt_no_nest( void );
431void _AArch64_Exception_interrupt_nest( void );
432static inline void* AArch64_set_exception_handler(
433  AArch64_symbolic_exception_name exception,
434  void (*handler)(void)
435)
436{
437  /* get current table address */
438  char *vbar = (char*)AArch64_get_vector_base_address();
439
440  /* calculate address of vector to be replaced */
441  char *cvector_address = vbar + VECTOR_ENTRY_SIZE * exception
442    + VECTOR_POINTER_OFFSET;
443
444  /* get current vector pointer */
445  void (**vector_address)(void) = (void(**)(void))cvector_address;
446  void (*current_vector_pointer)(void);
447  current_vector_pointer = *vector_address;
448
449  /* replace vector pointer */
450  *vector_address = handler;
451
452  /* return now-previous vector pointer */
453  return (void*)current_vector_pointer;
454}
455
456typedef struct {
457  uint64_t register_x0;
458  uint64_t register_x1;
459  uint64_t register_x2;
460  uint64_t register_x3;
461  uint64_t register_x4;
462  uint64_t register_x5;
463  uint64_t register_x6;
464  uint64_t register_x7;
465  uint64_t register_x8;
466  uint64_t register_x9;
467  uint64_t register_x10;
468  uint64_t register_x11;
469  uint64_t register_x12;
470  uint64_t register_x13;
471  uint64_t register_x14;
472  uint64_t register_x15;
473  uint64_t register_x16;
474  uint64_t register_x17;
475  uint64_t register_x18;
476  uint64_t register_x19;
477  uint64_t register_x20;
478  uint64_t register_x21;
479  uint64_t register_x22;
480  uint64_t register_x23;
481  uint64_t register_x24;
482  uint64_t register_x25;
483  uint64_t register_x26;
484  uint64_t register_x27;
485  uint64_t register_x28;
486  uint64_t register_fp;
487  void *register_lr;
488#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
489  uint32_t _register_lr_top;
490#endif
491  uint64_t register_sp;
492  void *register_pc;
493#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
494  uint32_t _register_pc_top;
495#endif
496  uint64_t register_daif;
497  uint64_t register_cpsr;
498  uint64_t register_syndrome;
499  uint64_t register_fault_address;
500  AArch64_symbolic_exception_name vector;
501  uint64_t reserved_for_stack_alignment;
502  uint64_t register_fpsr;
503  uint64_t register_fpcr;
504  uint128_t register_q0;
505  uint128_t register_q1;
506  uint128_t register_q2;
507  uint128_t register_q3;
508  uint128_t register_q4;
509  uint128_t register_q5;
510  uint128_t register_q6;
511  uint128_t register_q7;
512  uint128_t register_q8;
513  uint128_t register_q9;
514  uint128_t register_q10;
515  uint128_t register_q11;
516  uint128_t register_q12;
517  uint128_t register_q13;
518  uint128_t register_q14;
519  uint128_t register_q15;
520  uint128_t register_q16;
521  uint128_t register_q17;
522  uint128_t register_q18;
523  uint128_t register_q19;
524  uint128_t register_q20;
525  uint128_t register_q21;
526  uint128_t register_q22;
527  uint128_t register_q23;
528  uint128_t register_q24;
529  uint128_t register_q25;
530  uint128_t register_q26;
531  uint128_t register_q27;
532  uint128_t register_q28;
533  uint128_t register_q29;
534  uint128_t register_q30;
535  uint128_t register_q31;
536} CPU_Exception_frame;
537
538void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
539
540void _AArch64_Exception_default( CPU_Exception_frame *frame );
541
542/** Type that can store a 32-bit integer or a pointer. */
543typedef uintptr_t CPU_Uint32ptr;
544
545#ifdef __cplusplus
546}
547#endif
548
549#endif /* ASM */
550
551/** @} */
552
553#endif /* _RTEMS_SCORE_CPU_H */
Note: See TracBrowser for help on using the repository browser.