source: rtems/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S @ d188e6e8

Last change on this file since d188e6e8 was d188e6e8, checked in by Kinsey Moore <kinsey.moore@…>, on 12/08/20 at 15:11:34

cpukit/aarch64: Add explanation of exception flow

  • Property mode set to 100644
File size: 8.9 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSScoreCPUAArch64
7 *
8 * @brief Implementation of AArch64 interrupt exception handling
9 *
10 * This file implements the SP0 and SPx interrupt exception handlers to
11 * deal with nested and non-nested interrupts.
12 */
13
14/*
15 * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
16 * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 *    notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 *    notice, this list of conditions and the following disclaimer in the
25 *    documentation and/or other materials provided with the distribution.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#ifdef HAVE_CONFIG_H
41#include "config.h"
42#endif
43
44#include <rtems/asm.h>
45
46.globl  _AArch64_Exception_interrupt_no_nest
47.globl  _AArch64_Exception_interrupt_nest
48
49#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
50  #define SELF_CPU_CONTROL_GET_REG w19
51#else
52  #define SELF_CPU_CONTROL_GET_REG x19
53#endif
54#define SELF_CPU_CONTROL x19
55#define NON_VOLATILE_SCRATCH x20
56
57/* It's understood that CPU state is saved prior to and restored after this */
58/*
59 * NOTE: This function does not follow the AArch64 procedure call specification
60 * because all relevant state is known to be saved in the interrupt context,
61 * hence the blind usage of x19, x20, and x21
62 */
63.AArch64_Interrupt_Handler:
64/* Get per-CPU control of current processor */
65        GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
66
67/* Increment interrupt nest and thread dispatch disable level */
68        ldr     w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
69        ldr     w3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
70        add     w2, w2, #1
71        add     w3, w3, #1
72        str     w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
73        str     w3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
74
75/* Save LR */
76        mov     x21, LR
77
78/* Call BSP dependent interrupt dispatcher */
79        bl      bsp_interrupt_dispatch
80
81/* Restore LR */
82        mov     LR, x21
83
84/* Load some per-CPU variables */
85        ldr     w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
86        ldrb    w1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
87        ldr     w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
88        ldr     w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
89
90/* Decrement levels and determine thread dispatch state */
91        eor     w1, w1, w0
92        sub     w0, w0, #1
93        orr     w1, w1, w0
94        orr     w1, w1, w2
95        sub     w3, w3, #1
96
97/* Store thread dispatch disable and ISR nest levels */
98        str     w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
99        str     w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
100
101/* Return should_skip_thread_dispatch in x0 */
102        mov     x0, x1
103/* Return from handler */
104        ret
105
106/* NOTE: This function does not follow the AArch64 procedure call specification
107 * because all relevant state is known to be saved in the interrupt context,
108 * hence the blind usage of x19, x20, and x21 */
109.AArch64_Perform_Thread_Dispatch:
110/* Get per-CPU control of current processor */
111        GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
112
113/* Thread dispatch */
114        mrs     NON_VOLATILE_SCRATCH, DAIF
115
116.Ldo_thread_dispatch:
117
118/* Set ISR dispatch disable and thread dispatch disable level to one */
119        mov     w0, #1
120        str     w0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
121        str     w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
122
123/* Save LR */
124        mov     x21, LR
125
126/* Call _Thread_Do_dispatch(), this function will enable interrupts */
127        mov     x0, SELF_CPU_CONTROL
128        mov     x1, NON_VOLATILE_SCRATCH
129        mov     x2, #0x80
130        bic     x1, x1, x2
131        bl      _Thread_Do_dispatch
132
133/* Restore LR */
134        mov     LR, x21
135
136/* Disable interrupts */
137        msr     DAIF, NON_VOLATILE_SCRATCH
138
139#ifdef RTEMS_SMP
140        GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
141#endif
142
143/* Check if we have to do the thread dispatch again */
144        ldrb    w0, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
145        cmp     w0, #0
146        bne     .Ldo_thread_dispatch
147
148/* We are done with thread dispatching */
149        mov     w0, #0
150        str     w0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
151
152/* Return from thread dispatch */
153        ret
154
155/*
156 * Must save corruptible registers and non-corruptible registers expected to be
157 * used, x0 and lr expected to be already saved on the stack
158 */
159.macro  push_interrupt_context
160/*
161 * Push x1-x21 on to the stack, need 19-21 because they're modified without
162 * obeying PCS
163 */
164        stp lr,         x1,     [sp, #-0x10]!
165        stp x2,         x3,     [sp, #-0x10]!
166        stp x4,         x5,     [sp, #-0x10]!
167        stp x6,         x7,     [sp, #-0x10]!
168        stp x8,         x9,     [sp, #-0x10]!
169        stp x10,        x11,    [sp, #-0x10]!
170        stp x12,        x13,    [sp, #-0x10]!
171        stp x14,        x15,    [sp, #-0x10]!
172        stp x16,        x17,    [sp, #-0x10]!
173        stp x18,        x19,    [sp, #-0x10]!
174        stp x20,        x21,    [sp, #-0x10]!
175/*
176 * Push q0-q31 on to the stack, need everything because parts of every register
177 * are volatile/corruptible
178 */
179        stp q0,         q1,     [sp, #-0x20]!
180        stp q2,         q3,     [sp, #-0x20]!
181        stp q4,         q5,     [sp, #-0x20]!
182        stp q6,         q7,     [sp, #-0x20]!
183        stp q8,         q9,     [sp, #-0x20]!
184        stp q10,        q11,    [sp, #-0x20]!
185        stp q12,        q13,    [sp, #-0x20]!
186        stp q14,        q15,    [sp, #-0x20]!
187        stp q16,        q17,    [sp, #-0x20]!
188        stp q18,        q19,    [sp, #-0x20]!
189        stp q20,        q21,    [sp, #-0x20]!
190        stp q22,        q23,    [sp, #-0x20]!
191        stp q24,        q25,    [sp, #-0x20]!
192        stp q26,        q27,    [sp, #-0x20]!
193        stp q28,        q29,    [sp, #-0x20]!
194        stp q30,        q31,    [sp, #-0x20]!
195/* Get exception LR for PC and spsr */
196        mrs x0, ELR_EL1
197        mrs x1, SPSR_EL1
198/* Push pc and spsr */
199        stp x0,         x1,     [sp, #-0x10]!
200/* Get fpcr and fpsr */
201        mrs x0, FPSR
202        mrs x1, FPCR
203/* Push fpcr and fpsr */
204        stp x0,         x1,     [sp, #-0x10]!
205.endm
206
207/* Must match inverse order of .push_interrupt_context */
208.macro pop_interrupt_context
209/* Pop fpcr and fpsr */
210        ldp x0,         x1,     [sp], #0x10
211/* Restore fpcr and fpsr */
212        msr FPCR, x1
213        msr FPSR, x0
214/* Pop pc and spsr */
215        ldp x0,         x1,     [sp], #0x10
216/* Restore exception LR for PC and spsr */
217        msr SPSR_EL1, x1
218        msr ELR_EL1, x0
219/* Pop q0-q31 */
220        ldp q30,        q31,    [sp], #0x20
221        ldp q28,        q29,    [sp], #0x20
222        ldp q26,        q27,    [sp], #0x20
223        ldp q24,        q25,    [sp], #0x20
224        ldp q22,        q23,    [sp], #0x20
225        ldp q20,        q21,    [sp], #0x20
226        ldp q18,        q19,    [sp], #0x20
227        ldp q16,        q17,    [sp], #0x20
228        ldp q14,        q15,    [sp], #0x20
229        ldp q12,        q13,    [sp], #0x20
230        ldp q10,        q11,    [sp], #0x20
231        ldp q8,         q9,     [sp], #0x20
232        ldp q6,         q7,     [sp], #0x20
233        ldp q4,         q5,     [sp], #0x20
234        ldp q2,         q3,     [sp], #0x20
235        ldp q0,         q1,     [sp], #0x20
236/* Pop x1-x21 */
237        ldp x20,        x21,    [sp], #0x10
238        ldp x18,        x19,    [sp], #0x10
239        ldp x16,        x17,    [sp], #0x10
240        ldp x14,        x15,    [sp], #0x10
241        ldp x12,        x13,    [sp], #0x10
242        ldp x10,        x11,    [sp], #0x10
243        ldp x8,         x9,     [sp], #0x10
244        ldp x6,         x7,     [sp], #0x10
245        ldp x4,         x5,     [sp], #0x10
246        ldp x2,         x3,     [sp], #0x10
247        ldp lr,         x1,     [sp], #0x10
248/* Must clear reservations here to ensure consistency with atomic operations */
249        clrex
250.endm
251
252_AArch64_Exception_interrupt_nest:
253
254/* Execution template:
255Save volatile regs on interrupt stack
256Execute irq handler
257Restore volatile regs from interrupt stack
258Return to embedded exception vector code
259*/
260
261/* Push interrupt context */
262        push_interrupt_context
263
264/* Jump into the handler, ignore return value */
265        bl .AArch64_Interrupt_Handler
266
267/*
268 * SP should be where it was pre-handler (pointing at the exception frame)
269 * or something has leaked stack space
270 */
271/* Pop interrupt context */
272        pop_interrupt_context
273/* Return to vector for final cleanup */
274        ret
275
276_AArch64_Exception_interrupt_no_nest:
277/* Execution template:
278Save volatile registers on thread stack(some x, all q, ELR, etc.)
279Switch to interrupt stack
280Execute interrupt handler
281Switch to thread stack
282Call thread dispatch
283Restore volatile registers from thread stack
284Return to embedded exception vector code
285*/
286
287
288/* Push interrupt context */
289        push_interrupt_context
290
291/*
292 * Switch to interrupt stack, interrupt dispatch may enable interrupts causing
293 * nesting
294 */
295        msr     spsel, #0
296
297/* Jump into the handler */
298        bl .AArch64_Interrupt_Handler
299
300/*
301 * Switch back to thread stack, interrupt dispatch should disable interrupts
302 * before returning
303 */
304        msr     spsel, #1
305
306/*
307 * Check thread dispatch necessary, ISR dispatch disable and thread dispatch
308 * disable level.
309 */
310        cmp     x0, #0
311        bne     .Lno_need_thread_dispatch
312        bl .AArch64_Perform_Thread_Dispatch
313
314.Lno_need_thread_dispatch:
315/*
316 * SP should be where it was pre-handler (pointing at the exception frame)
317 * or something has leaked stack space
318 */
319/* Pop interrupt context */
320        pop_interrupt_context
321/* Return to vector for final cleanup */
322        ret
Note: See TracBrowser for help on using the repository browser.