source: rtems/bsps/aarch64/shared/start/start.S @ 6c36cb7a

Last change on this file since 6c36cb7a was 6c36cb7a, checked in by Gedare Bloom <gedare@…>, on 01/06/22 at 20:28:34

aarch64: always boot into EL1NS

Always start the executive in Exception Level 1, Non-Secure mode.
If we boot in EL3 Secure with GICv3 then we have to initialize
the distributor and redistributor to set up G1NS interrupts
early in the boot sequence before stepping down from EL3S to EL1NS.

Now there is no need to distinguish between secure and non-secure
world execution after the primary core boots, so get rid of the
AARCH64_IS_NONSECURE configuration option.

  • Property mode set to 100644
File size: 7.3 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSBSPsAArch64Shared
7 *
8 * @brief Boot and system start code.
9 */
10
11/*
12 * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
13 * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 *    notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 *    notice, this list of conditions and the following disclaimer in the
22 *    documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <rtems/asm.h>
38#include <rtems/score/percpu.h>
39
40#include <bspopts.h>
41
42  /* Global symbols */
43  .globl _start
44  .section ".bsp_start_text", "ax"
45
46/* Start entry */
47
48_start:
49
50  /*
51   * We do not save the context since we do not return to the boot
52   * loader but preserve x1 and x2 to allow access to bootloader parameters
53   */
54#ifndef BSP_START_NEEDS_REGISTER_INITIALIZATION
55  mov x5, x1    /* machine type number or ~0 for DT boot */
56  mov x6, x2    /* physical address of ATAGs or DTB */
57#else /* BSP_START_NEEDS_REGISTER_INITIALIZATION */
58  /*
59   * This block is dead code. No aarch64 targets require this. It might be
60   * needed for hardware simulations or in future processor variants with
61   * lock-step cores.
62   */
63  mov x0, XZR
64  mov x1, XZR
65  mov x2, XZR
66  mov x3, XZR
67  mov x4, XZR
68  mov x5, XZR
69  mov x6, XZR
70  mov x7, XZR
71  mov x8, XZR
72  mov x9, XZR
73  mov x10, XZR
74  mov x11, XZR
75  mov x12, XZR
76  mov x13, XZR
77  mov x14, XZR
78  mov x15, XZR
79  mov x16, XZR
80  mov x17, XZR
81  mov x18, XZR
82  mov x19, XZR
83  mov x20, XZR
84  mov x21, XZR
85  mov x22, XZR
86  mov x23, XZR
87  mov x24, XZR
88  mov x25, XZR
89  mov x26, XZR
90  mov x27, XZR
91  mov x28, XZR
92  mov x29, XZR
93  mov x30, XZR
94#ifdef AARCH64_MULTILIB_VFP
95  mov CPTR_EL3, XZR
96  mov CPTR_EL2, XZR
97  mov d0, XZR
98  mov d1, XZR
99  mov d2, XZR
100  mov d3, XZR
101  mov d4, XZR
102  mov d5, XZR
103  mov d6, XZR
104  mov d7, XZR
105  mov d8, XZR
106  mov d9, XZR
107  mov d10, XZR
108  mov d11, XZR
109  mov d12, XZR
110  mov d13, XZR
111  mov d14, XZR
112  mov d15, XZR
113  mov d16, XZR
114  mov d17, XZR
115  mov d18, XZR
116  mov d19, XZR
117  mov d20, XZR
118  mov d21, XZR
119  mov d22, XZR
120  mov d23, XZR
121  mov d24, XZR
122  mov d25, XZR
123  mov d26, XZR
124  mov d27, XZR
125  mov d28, XZR
126  mov d29, XZR
127  mov d30, XZR
128  mov d31, XZR
129#endif /* AARCH64_MULTILIB_VFP */
130#endif /* BSP_START_NEEDS_REGISTER_INITIALIZATION */
131
132  /* Initialize SCTLR_EL1 */
133  mov x0, XZR
134#if defined(RTEMS_DEBUG)
135  /* Enable Stack alignment checking */
136  orr x0, x0, #(1<<3)
137#endif
138  msr SCTLR_EL1, x0
139
140  mrs x0, CurrentEL
141  cmp x0, #(1<<2)
142  b.eq _el1_start
143  cmp x0, #(2<<2)
144  b.eq _el2_start
145
146_el3_start:
147  /*
148   * Before leaving the Secure World, we need to initialize the GIC. We
149   * do that here in an early stack context in EL3. This will NOT work
150   * on secondary core boot! We assume only the primary boot core will
151   * start in EL3 if any. Usually on real hardware, we should be running
152   * on top of trusted firmware and will not boot in EL3. Qemu fakes it
153   * for us and will start the primary core in EL3 and secondary cores
154   * will be brought up in EL1NS as expected.
155   */
156  #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
157    ldr w1, =_ISR_Stack_size
158    ldr w2, =_ISR_Stack_area_begin
159  #else
160    ldr x1, =_ISR_Stack_size
161    ldr x2, =_ISR_Stack_area_begin
162  #endif
163  add x3, x1, x2
164  /* using SP0 for the early init stack context at EL3 */
165  msr spsel, #0
166  mov sp, x3
167
168  /*
169   * Invoke the start hook 0.
170   * We don't set up exception handling, so this hook better behave.
171   */
172  bl bsp_start_hook_0
173
174  /* Drop from EL3 to EL2 */
175
176  /* Initialize HCR_EL2 and SCTLR_EL2 */
177  msr HCR_EL2, XZR
178  msr SCTLR_EL2, XZR
179  /* Set EL2 Execution state via SCR_EL3 */
180  mrs x0, SCR_EL3
181  /* Set EL2 to AArch64 */
182  orr x0, x0, #(1<<10)
183  /* Set EL1 to NS */
184  orr x0, x0, #1
185  msr SCR_EL3, x0
186
187  /* set EL2h mode for eret */
188  mov x0, #0b01001
189  msr SPSR_EL3, x0
190
191  /* Set EL2 entry point */
192  adr x0, _el2_start
193  msr ELR_EL3, x0
194  eret
195
196_el2_start:
197  /* Drop from EL2 to EL1 */
198
199  /* Configure HCR_EL2 */
200  mrs x0, HCR_EL2
201  /* Set EL1 Execution state to AArch64 */
202  orr x0, x0, #(1<<31)
203  /* Disable ID traps */
204  bic x0, x0, #(1<<15)
205  bic x0, x0, #(1<<16)
206  bic x0, x0, #(1<<17)
207  bic x0, x0, #(1<<18)
208  msr HCR_EL2, x0
209
210  /* Set to EL1h mode for eret */
211  mov x0, #0b00101
212  msr SPSR_EL2, x0
213
214  /* Set EL1 entry point */
215  adr x0, _el1_start
216  msr ELR_EL2, x0
217  eret
218
219_el1_start:
220
221#ifdef RTEMS_SMP
222  /* Read MPIDR and get current processor index */
223  mrs x7, mpidr_el1
224  and x7, x7, #0xff
225#endif
226
227#ifdef RTEMS_SMP
228  /*
229   * Get current per-CPU control and store it in PL1 only Thread ID
230   * Register (TPIDR_EL1).
231   */
232#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
233  ldr w1, =_Per_CPU_Information
234#else
235  ldr x1, =_Per_CPU_Information
236#endif
237  add x1, x1, x7, lsl #PER_CPU_CONTROL_SIZE_LOG2
238  msr TPIDR_EL1, x1
239
240#endif
241
242  /* Calculate interrupt stack area end for current processor */
243#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
244  ldr w1, =_ISR_Stack_size
245#else
246  ldr x1, =_ISR_Stack_size
247#endif
248#ifdef RTEMS_SMP
249  add x3, x7, #1
250  mul x1, x1, x3
251#endif
252#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
253  ldr w2, =_ISR_Stack_area_begin
254#else
255  ldr x2, =_ISR_Stack_area_begin
256#endif
257  add x3, x1, x2
258
259  /* Disable interrupts and debug */
260  msr DAIFSet, #0xa
261
262#ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION
263  mov x8, XZR
264  mov x9, XZR
265  mov x10, XZR
266  mov x11, XZR
267  mov x12, XZR
268  mov x13, XZR
269  mov x14, XZR
270  mov x15, XZR
271#endif
272
273  /*
274   * SPx: the stack pointer corresponding to the current exception level
275   * Normal operation for RTEMS on AArch64 uses SPx and runs on EL1
276   * Exception operation (synchronous errors, IRQ, FIQ, System Errors) uses SP0
277  */
278#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
279  ldr w1, =bsp_stack_exception_size
280#else
281  ldr x1, =bsp_stack_exception_size
282#endif
283  /* Switch to SP0 and set exception stack */
284  msr spsel, #0
285  mov sp, x3
286  /* Switch back to SPx for normal operation */
287  msr spsel, #1
288  sub x3, x3, x1
289
290  /* Set SP1 stack used for normal operation */
291  mov sp, x3
292
293  /* Stay in EL1 mode */
294
295#ifdef AARCH64_MULTILIB_VFP
296#ifdef AARCH64_MULTILIB_HAS_CPACR
297  /* Read CPACR */
298  mrs x0, CPACR_EL1
299
300  /* Enable EL1 access permissions for CP10 */
301  orr x0, x0, #(1 << 20)
302
303  /* Write CPACR */
304  msr CPACR_EL1, x0
305  isb
306#endif
307
308  /* FPU does not need to be enabled on AArch64 */
309
310#endif /* AARCH64_MULTILIB_VFP */
311
312  /* Branch to start hook 1 */
313  bl bsp_start_hook_1
314
315  /* Branch to boot card */
316  mov x0, #0
317  bl boot_card
Note: See TracBrowser for help on using the repository browser.