1 | /* SPDX-License-Identifier: BSD-2-Clause */ |
---|
2 | |
---|
3 | /** |
---|
4 | * @file |
---|
5 | * |
---|
6 | * @ingroup RTEMSScoreCPUAArch64 |
---|
7 | * |
---|
8 | * @brief AArch64 architecture support implementation. |
---|
9 | */ |
---|
10 | |
---|
11 | /* |
---|
12 | * Copyright (C) 2020 On-Line Applications Research Corporation (OAR) |
---|
13 | * Written by Kinsey Moore <kinsey.moore@oarcorp.com> |
---|
14 | * |
---|
15 | * Redistribution and use in source and binary forms, with or without |
---|
16 | * modification, are permitted provided that the following conditions |
---|
17 | * are met: |
---|
18 | * 1. Redistributions of source code must retain the above copyright |
---|
19 | * notice, this list of conditions and the following disclaimer. |
---|
20 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
21 | * notice, this list of conditions and the following disclaimer in the |
---|
22 | * documentation and/or other materials provided with the distribution. |
---|
23 | * |
---|
24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
---|
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
---|
28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
---|
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
---|
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
---|
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
---|
32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
---|
33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
---|
34 | * POSSIBILITY OF SUCH DAMAGE. |
---|
35 | */ |
---|
36 | |
---|
37 | #ifdef HAVE_CONFIG_H |
---|
38 | #include "config.h" |
---|
39 | #endif |
---|
40 | |
---|
41 | #include <rtems/score/cpuimpl.h> |
---|
42 | #include <rtems/score/thread.h> |
---|
43 | #include <rtems/score/tls.h> |
---|
44 | |
---|
45 | #ifdef AARCH64_MULTILIB_VFP |
---|
46 | RTEMS_STATIC_ASSERT( |
---|
47 | offsetof( Context_Control, register_d8 ) |
---|
48 | == AARCH64_CONTEXT_CONTROL_D8_OFFSET, |
---|
49 | AARCH64_CONTEXT_CONTROL_D8_OFFSET |
---|
50 | ); |
---|
51 | #endif |
---|
52 | |
---|
53 | RTEMS_STATIC_ASSERT( |
---|
54 | offsetof( Context_Control, thread_id ) |
---|
55 | == AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET, |
---|
56 | AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET |
---|
57 | ); |
---|
58 | |
---|
59 | RTEMS_STATIC_ASSERT( |
---|
60 | offsetof( Context_Control, isr_dispatch_disable ) |
---|
61 | == AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE, |
---|
62 | AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE |
---|
63 | ); |
---|
64 | |
---|
65 | #ifdef RTEMS_SMP |
---|
66 | RTEMS_STATIC_ASSERT( |
---|
67 | offsetof( Context_Control, is_executing ) |
---|
68 | == AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET, |
---|
69 | AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET |
---|
70 | ); |
---|
71 | #endif |
---|
72 | |
---|
73 | RTEMS_STATIC_ASSERT( |
---|
74 | sizeof( CPU_Exception_frame ) == AARCH64_EXCEPTION_FRAME_SIZE, |
---|
75 | AARCH64_EXCEPTION_FRAME_SIZE |
---|
76 | ); |
---|
77 | |
---|
78 | RTEMS_STATIC_ASSERT( |
---|
79 | sizeof( CPU_Exception_frame ) % CPU_STACK_ALIGNMENT == 0, |
---|
80 | CPU_Exception_frame_alignment |
---|
81 | ); |
---|
82 | |
---|
83 | RTEMS_STATIC_ASSERT( |
---|
84 | offsetof( CPU_Exception_frame, register_sp ) |
---|
85 | == AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET, |
---|
86 | AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET |
---|
87 | ); |
---|
88 | |
---|
89 | RTEMS_STATIC_ASSERT( |
---|
90 | offsetof( CPU_Exception_frame, register_lr ) |
---|
91 | == AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET, |
---|
92 | AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET |
---|
93 | ); |
---|
94 | |
---|
95 | RTEMS_STATIC_ASSERT( |
---|
96 | offsetof( CPU_Exception_frame, register_daif ) |
---|
97 | == AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET, |
---|
98 | AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET |
---|
99 | ); |
---|
100 | |
---|
101 | RTEMS_STATIC_ASSERT( |
---|
102 | offsetof( CPU_Exception_frame, register_syndrome ) |
---|
103 | == AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET, |
---|
104 | AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET |
---|
105 | ); |
---|
106 | |
---|
107 | RTEMS_STATIC_ASSERT( |
---|
108 | offsetof( CPU_Exception_frame, vector ) |
---|
109 | == AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET, |
---|
110 | AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET |
---|
111 | ); |
---|
112 | |
---|
113 | RTEMS_STATIC_ASSERT( |
---|
114 | offsetof( CPU_Exception_frame, register_fpsr ) |
---|
115 | == AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET, |
---|
116 | AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET |
---|
117 | ); |
---|
118 | |
---|
119 | RTEMS_STATIC_ASSERT( |
---|
120 | offsetof( CPU_Exception_frame, register_q0 ) |
---|
121 | == AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET, |
---|
122 | AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET |
---|
123 | ); |
---|
124 | |
---|
125 | |
---|
126 | void _CPU_Context_Initialize( |
---|
127 | Context_Control *the_context, |
---|
128 | void *stack_area_begin, |
---|
129 | size_t stack_area_size, |
---|
130 | uint64_t new_level, |
---|
131 | void (*entry_point)( void ), |
---|
132 | bool is_fp, |
---|
133 | void *tls_area |
---|
134 | ) |
---|
135 | { |
---|
136 | (void) new_level; |
---|
137 | |
---|
138 | the_context->register_sp = (uintptr_t) stack_area_begin + stack_area_size; |
---|
139 | the_context->register_lr = (uintptr_t) entry_point; |
---|
140 | the_context->isr_dispatch_disable = 0; |
---|
141 | |
---|
142 | the_context->thread_id = (uintptr_t) tls_area; |
---|
143 | |
---|
144 | if ( tls_area != NULL ) { |
---|
145 | the_context->thread_id = (uintptr_t) _TLS_Initialize_area( tls_area ); |
---|
146 | } |
---|
147 | } |
---|
148 | |
---|
149 | void _CPU_ISR_Set_level( uint32_t level ) |
---|
150 | { |
---|
151 | /* Set the mask bit if interrupts are disabled */ |
---|
152 | if ( level ) { |
---|
153 | __asm__ volatile ( |
---|
154 | "msr DAIFSet, #0x2\n" |
---|
155 | : : [level] "r" (level) |
---|
156 | ); |
---|
157 | } else { |
---|
158 | __asm__ volatile ( |
---|
159 | "msr DAIFClr, #0x2\n" |
---|
160 | : : [level] "r" (level) |
---|
161 | ); |
---|
162 | } |
---|
163 | } |
---|
164 | |
---|
165 | uint32_t _CPU_ISR_Get_level( void ) |
---|
166 | { |
---|
167 | uint64_t level; |
---|
168 | |
---|
169 | __asm__ volatile ( |
---|
170 | "mrs %[level], DAIF\n" |
---|
171 | : [level] "=&r" (level) |
---|
172 | ); |
---|
173 | |
---|
174 | return ( level & AARCH64_PSTATE_I ) != 0; |
---|
175 | } |
---|
176 | |
---|
177 | void _CPU_Initialize( void ) |
---|
178 | { |
---|
179 | /* Do nothing */ |
---|
180 | } |
---|
181 | |
---|
182 | void _CPU_Fatal_halt( uint32_t source, CPU_Uint32ptr error ) |
---|
183 | { |
---|
184 | ISR_Level level; |
---|
185 | |
---|
186 | _CPU_ISR_Disable( level ); |
---|
187 | (void) level; |
---|
188 | __asm__ volatile ("mov x0, %0\n" |
---|
189 | : "=r" (error) |
---|
190 | : "0" (error) |
---|
191 | : "x0" ); |
---|
192 | while (1); |
---|
193 | } |
---|