source: rtems/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S

Last change on this file was 02377c8, checked in by Kinsey Moore <kinsey.moore@…>, on 08/10/21 at 13:09:37

cpukit/aarch64: Add exception extensions support

This adds the function implementations necessary to add exception
extensions support to AArch64.

  • Property mode set to 100644
File size: 15.2 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSScoreCPUAArch64
7 *
8 * @brief Implementation of AArch64 interrupt exception handling
9 *
10 * This file implements the SP0 and SPx interrupt exception handlers to
11 * deal with nested and non-nested interrupts.
12 */
13
14/*
15 * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
16 * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 *    notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 *    notice, this list of conditions and the following disclaimer in the
25 *    documentation and/or other materials provided with the distribution.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#ifdef HAVE_CONFIG_H
41#include "config.h"
42#endif
43
44#include <rtems/asm.h>
45
46.globl  _AArch64_Exception_interrupt_no_nest
47.globl  _AArch64_Exception_interrupt_nest
48.globl  _CPU_Exception_dispatch_and_resume
49.globl  _CPU_Exception_resume
50
51#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
52  #ifdef RTEMS_SMP
53    #define SELF_CPU_CONTROL_GET_REG x19
54  #else
55    #define SELF_CPU_CONTROL_GET_REG w19
56  #endif
57#else
58  #define SELF_CPU_CONTROL_GET_REG x19
59#endif
60#define SELF_CPU_CONTROL x19
61#define NON_VOLATILE_SCRATCH x20
62
63/* It's understood that CPU state is saved prior to and restored after this */
64/*
65 * NOTE: This function does not follow the AArch64 procedure call specification
66 * because all relevant state is known to be saved in the interrupt context,
67 * hence the blind usage of x19, x20, and x21
68 */
69.AArch64_Interrupt_Handler:
70/* Get per-CPU control of current processor */
71        GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
72
73/* Increment interrupt nest and thread dispatch disable level */
74        ldr     w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
75        ldr     w3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
76        add     w2, w2, #1
77        add     w3, w3, #1
78        str     w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
79        str     w3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
80
81/* Save LR */
82        mov     x21, LR
83
84/* Call BSP dependent interrupt dispatcher */
85        bl      bsp_interrupt_dispatch
86
87/* Restore LR */
88        mov     LR, x21
89
90/* Load some per-CPU variables */
91        ldr     w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
92        ldrb    w1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
93        ldr     w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
94        ldr     w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
95
96/* Decrement levels and determine thread dispatch state */
97        eor     w1, w1, w0
98        sub     w0, w0, #1
99        orr     w1, w1, w0
100        orr     w1, w1, w2
101        sub     w3, w3, #1
102
103/* Store thread dispatch disable and ISR nest levels */
104        str     w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
105        str     w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
106
107/* Return should_skip_thread_dispatch in x0 */
108        mov     x0, x1
109/* Return from handler */
110        ret
111
112/* NOTE: This function does not follow the AArch64 procedure call specification
113 * because all relevant state is known to be saved in the interrupt context,
114 * hence the blind usage of x19, x20, and x21 */
115.AArch64_Perform_Thread_Dispatch:
116/* Get per-CPU control of current processor */
117        GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
118
119/* Thread dispatch */
120        mrs     NON_VOLATILE_SCRATCH, DAIF
121
122.Ldo_thread_dispatch:
123
124/* Set ISR dispatch disable and thread dispatch disable level to one */
125        mov     w0, #1
126        str     w0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
127        str     w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
128
129/* Save LR */
130        mov     x21, LR
131
132/* Call _Thread_Do_dispatch(), this function will enable interrupts */
133        mov     x0, SELF_CPU_CONTROL
134        mov     x1, NON_VOLATILE_SCRATCH
135        mov     x2, #0x80
136        bic     x1, x1, x2
137        bl      _Thread_Do_dispatch
138
139/* Restore LR */
140        mov     LR, x21
141
142/* Disable interrupts */
143        msr     DAIF, NON_VOLATILE_SCRATCH
144
145#ifdef RTEMS_SMP
146        GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
147#endif
148
149/* Check if we have to do the thread dispatch again */
150        ldrb    w0, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
151        cmp     w0, #0
152        bne     .Ldo_thread_dispatch
153
154/* We are done with thread dispatching */
155        mov     w0, #0
156        str     w0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
157
158/* Return from thread dispatch */
159        ret
160
161/*
162 * Must save corruptible registers and non-corruptible registers expected to be
163 * used, x0 and lr expected to be already saved on the stack
164 */
165.macro  push_interrupt_context
166/*
167 * Push x1-x21 on to the stack, need 19-21 because they're modified without
168 * obeying PCS
169 */
170        stp lr,         x1,     [sp, #-0x10]!
171        stp x2,         x3,     [sp, #-0x10]!
172        stp x4,         x5,     [sp, #-0x10]!
173        stp x6,         x7,     [sp, #-0x10]!
174        stp x8,         x9,     [sp, #-0x10]!
175        stp x10,        x11,    [sp, #-0x10]!
176        stp x12,        x13,    [sp, #-0x10]!
177        stp x14,        x15,    [sp, #-0x10]!
178        stp x16,        x17,    [sp, #-0x10]!
179        stp x18,        x19,    [sp, #-0x10]!
180        stp x20,        x21,    [sp, #-0x10]!
181/*
182 * Push q0-q31 on to the stack, need everything because parts of every register
183 * are volatile/corruptible
184 */
185        stp q0,         q1,     [sp, #-0x20]!
186        stp q2,         q3,     [sp, #-0x20]!
187        stp q4,         q5,     [sp, #-0x20]!
188        stp q6,         q7,     [sp, #-0x20]!
189        stp q8,         q9,     [sp, #-0x20]!
190        stp q10,        q11,    [sp, #-0x20]!
191        stp q12,        q13,    [sp, #-0x20]!
192        stp q14,        q15,    [sp, #-0x20]!
193        stp q16,        q17,    [sp, #-0x20]!
194        stp q18,        q19,    [sp, #-0x20]!
195        stp q20,        q21,    [sp, #-0x20]!
196        stp q22,        q23,    [sp, #-0x20]!
197        stp q24,        q25,    [sp, #-0x20]!
198        stp q26,        q27,    [sp, #-0x20]!
199        stp q28,        q29,    [sp, #-0x20]!
200        stp q30,        q31,    [sp, #-0x20]!
201/* Get exception LR for PC and spsr */
202        mrs x0, ELR_EL1
203        mrs x1, SPSR_EL1
204/* Push pc and spsr */
205        stp x0,         x1,     [sp, #-0x10]!
206/* Get fpcr and fpsr */
207        mrs x0, FPSR
208        mrs x1, FPCR
209/* Push fpcr and fpsr */
210        stp x0,         x1,     [sp, #-0x10]!
211.endm
212
213/* Must match inverse order of .push_interrupt_context */
214.macro pop_interrupt_context
215/* Pop fpcr and fpsr */
216        ldp x0,         x1,     [sp], #0x10
217/* Restore fpcr and fpsr */
218        msr FPCR, x1
219        msr FPSR, x0
220/* Pop pc and spsr */
221        ldp x0,         x1,     [sp], #0x10
222/* Restore exception LR for PC and spsr */
223        msr SPSR_EL1, x1
224        msr ELR_EL1, x0
225/* Pop q0-q31 */
226        ldp q30,        q31,    [sp], #0x20
227        ldp q28,        q29,    [sp], #0x20
228        ldp q26,        q27,    [sp], #0x20
229        ldp q24,        q25,    [sp], #0x20
230        ldp q22,        q23,    [sp], #0x20
231        ldp q20,        q21,    [sp], #0x20
232        ldp q18,        q19,    [sp], #0x20
233        ldp q16,        q17,    [sp], #0x20
234        ldp q14,        q15,    [sp], #0x20
235        ldp q12,        q13,    [sp], #0x20
236        ldp q10,        q11,    [sp], #0x20
237        ldp q8,         q9,     [sp], #0x20
238        ldp q6,         q7,     [sp], #0x20
239        ldp q4,         q5,     [sp], #0x20
240        ldp q2,         q3,     [sp], #0x20
241        ldp q0,         q1,     [sp], #0x20
242/* Pop x1-x21 */
243        ldp x20,        x21,    [sp], #0x10
244        ldp x18,        x19,    [sp], #0x10
245        ldp x16,        x17,    [sp], #0x10
246        ldp x14,        x15,    [sp], #0x10
247        ldp x12,        x13,    [sp], #0x10
248        ldp x10,        x11,    [sp], #0x10
249        ldp x8,         x9,     [sp], #0x10
250        ldp x6,         x7,     [sp], #0x10
251        ldp x4,         x5,     [sp], #0x10
252        ldp x2,         x3,     [sp], #0x10
253        ldp lr,         x1,     [sp], #0x10
254/* Must clear reservations here to ensure consistency with atomic operations */
255        clrex
256.endm
257
258_AArch64_Exception_interrupt_nest:
259
260/* Execution template:
261Save volatile regs on interrupt stack
262Execute irq handler
263Restore volatile regs from interrupt stack
264Return to embedded exception vector code
265*/
266
267/* Push interrupt context */
268        push_interrupt_context
269
270/* Jump into the handler, ignore return value */
271        bl .AArch64_Interrupt_Handler
272
273/*
274 * SP should be where it was pre-handler (pointing at the exception frame)
275 * or something has leaked stack space
276 */
277/* Pop interrupt context */
278        pop_interrupt_context
279/* Return to vector for final cleanup */
280        ret
281
282_AArch64_Exception_interrupt_no_nest:
283/* Execution template:
284Save volatile registers on thread stack(some x, all q, ELR, etc.)
285Switch to interrupt stack
286Execute interrupt handler
287Switch to thread stack
288Call thread dispatch
289Restore volatile registers from thread stack
290Return to embedded exception vector code
291*/
292
293
294/* Push interrupt context */
295        push_interrupt_context
296
297/*
298 * Switch to interrupt stack, interrupt dispatch may enable interrupts causing
299 * nesting
300 */
301        msr     spsel, #0
302
303/* Jump into the handler */
304        bl .AArch64_Interrupt_Handler
305
306/*
307 * Switch back to thread stack, interrupt dispatch should disable interrupts
308 * before returning
309 */
310        msr     spsel, #1
311
312/*
313 * Check thread dispatch necessary, ISR dispatch disable and thread dispatch
314 * disable level.
315 */
316        cmp     x0, #0
317        bne     .Lno_need_thread_dispatch
318        bl .AArch64_Perform_Thread_Dispatch
319
320.Lno_need_thread_dispatch:
321/*
322 * SP should be where it was pre-handler (pointing at the exception frame)
323 * or something has leaked stack space
324 */
325/* Pop interrupt context */
326        pop_interrupt_context
327/* Return to vector for final cleanup */
328        ret
329
330/*
331 * This function is expected to resume execution using the CPU_Exception_frame
332 * provided in x0. This function  does not adhere to the AAPCS64 calling
333 * convention because all necessary state is contained within the exception
334 * frame.
335 */
336_CPU_Exception_resume:
337/* Reset stack pointer */
338        mov     sp, x0
339
340/* call CEF restore routine (doesn't restore lr) */
341        bl .pop_exception_context
342
343/* get lr from CEF */
344        ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
345
346/* drop space reserved for CEF */
347        add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
348
349/* switch to thread stack */
350        msr spsel, #1
351        eret
352
353/*
354 * This function is expected to undo dispatch disabling, perform dispatch, and
355 * resume execution using the CPU_Exception_frame provided in x0. This function
356 * does not adhere to the AAPCS64 calling convention because all necessary
357 * state is contained within the exception frame.
358 */
359_CPU_Exception_dispatch_and_resume:
360/* Get per-CPU control of current processor */
361        GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
362
363/* Reset stack pointer */
364        mov     sp, x0
365
366/* Check dispatch disable and perform dispatch if necessary */
367/* Load some per-CPU variables */
368        ldr     w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
369        ldrb    w1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
370        ldr     w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
371        ldr     w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
372
373/* Decrement levels and determine thread dispatch state */
374        eor     w1, w1, w0
375        sub     w0, w0, #1
376        orr     w1, w1, w0
377        orr     w1, w1, w2
378        sub     w3, w3, #1
379
380/* Store thread dispatch disable and ISR nest levels */
381        str     w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
382        str     w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
383
384/* store should_skip_thread_dispatch in x22 */
385        mov x22, x1
386
387/*
388 * It is now safe to assume that the source of the exception has been resolved.
389 * Copy the exception frame to the thread stack to be compatible with thread
390 * dispatch. This may arbitrarily clobber corruptible registers since all
391 * important state is contained in the exception frame.
392 *
393 * No need to save current LR since this will never return to the caller.
394 */
395        bl .move_exception_frame_and_switch_to_thread_stack
396
397/*
398 * Check thread dispatch necessary, ISR dispatch disable and thread dispatch
399 * disable level.
400 */
401        cmp     x22, #0
402        bne     .Lno_need_thread_dispatch_resume
403        bl .AArch64_Perform_Thread_Dispatch
404.Lno_need_thread_dispatch_resume:
405/* call CEF restore routine (doesn't restore lr) */
406        bl .pop_exception_context
407
408/* get lr from CEF */
409        ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
410
411/* drop space reserved for CEF */
412        add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
413        eret
414
415/* Assumes sp currently points to the EF on the exception stack and SPSel is 0 */
416.move_exception_frame_and_switch_to_thread_stack:
417        mov x1, sp                                                      /* Set x1 to the current exception frame */
418        msr spsel, #1                                                   /* switch to thread stack */
419        ldr x0, [x1, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]       /* Get thread SP from exception frame since it may have been updated */
420        mov sp, x0
421        sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE                       /* reserve space for CEF */
422        mov x0, sp                                                      /* Set x0 to the new exception frame */
423        mov x20, lr                                                     /* Save LR */
424        bl _AArch64_Exception_frame_copy                                /* Copy exception frame to reserved thread stack space */
425        mov lr, x20                                                     /* Restore LR */
426        msr spsel, #0                                                   /* switch to exception stack */
427        add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE                       /* release space for CEF on exception stack */
428        msr spsel, #1                                                   /* switch to thread stack */
429        ret
430
431/*
432 * Apply the exception frame to the current register status, SP points to the EF
433 */
434.pop_exception_context:
435/* Pop daif and spsr */
436        ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
437/* Restore daif and spsr */
438        msr DAIF, x2
439        msr SPSR_EL1, x3
440/* Pop FAR and ESR */
441        ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
442/* Restore ESR and FAR */
443        msr ESR_EL1, x2
444        msr FAR_EL1, x3
445/* Pop fpcr and fpsr */
446        ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
447/* Restore fpcr and fpsr */
448        msr FPSR, x2
449        msr FPCR, x3
450/* Pop VFP registers */
451        ldp q0,  q1,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)]
452        ldp q2,  q3,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)]
453        ldp q4,  q5,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x040)]
454        ldp q6,  q7,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x060)]
455        ldp q8,  q9,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x080)]
456        ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0a0)]
457        ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0c0)]
458        ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0e0)]
459        ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x100)]
460        ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x120)]
461        ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x140)]
462        ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x160)]
463        ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x180)]
464        ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1a0)]
465        ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1c0)]
466        ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
467/* Pop x0-x29(fp) */
468        ldp x2,  x3,  [sp, #0x10]
469        ldp x4,  x5,  [sp, #0x20]
470        ldp x6,  x7,  [sp, #0x30]
471        ldp x8,  x9,  [sp, #0x40]
472        ldp x10, x11, [sp, #0x50]
473        ldp x12, x13, [sp, #0x60]
474        ldp x14, x15, [sp, #0x70]
475        ldp x16, x17, [sp, #0x80]
476        ldp x18, x19, [sp, #0x90]
477        ldp x20, x21, [sp, #0xa0]
478        ldp x22, x23, [sp, #0xb0]
479        ldp x24, x25, [sp, #0xc0]
480        ldp x26, x27, [sp, #0xd0]
481        ldp x28, x29, [sp, #0xe0]
482/* Pop ELR, SP already popped */
483        ldr x1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET + 0x8)]
484/* Restore exception LR */
485        msr ELR_EL1, x1
486        ldp x0,  x1,  [sp, #0x00]
487
488/* We must clear reservations to ensure consistency with atomic operations */
489        clrex
490
491        ret
Note: See TracBrowser for help on using the repository browser.