source: rtems/cpukit/score/cpu/aarch64/cpu_asm.S

Last change on this file was 32f0f11, checked in by Sebastian Huber <sebastian.huber@…>, on 03/08/22 at 15:56:49

SMP: Fix start multitasking for some targets

The previous SMP multitasking start assumed that the initial heir thread of a
processor starts execution in _Thread_Handler(). The _Thread_Handler() sets
the interrupt state explicitly by _ISR_Set_level() before it calls the thread
entry. Under certain timing conditions, processors may perform an initial
context switch to a thread which already executes its thread body (see
smptests/smpstart01). In this case, interrupts are disabled after the context
switch on targets which do not save/restore the interrupt state during a
context switch (aarch64, arm, and riscv).

Close #4627.

  • Property mode set to 100644
File size: 5.7 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSScoreCPUAArch64
7 *
8 * @brief AArch64 architecture context switch implementation.
9 */
10
11/*
12 * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
13 * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 *    notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 *    notice, this list of conditions and the following disclaimer in the
22 *    documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
41#include <rtems/asm.h>
42
43        .text
44
45/*
46 *  void _CPU_Context_switch( run_context, heir_context )
47 *  void _CPU_Context_restore( run_context, heir_context )
48 *
49 *  This routine performs a normal non-FP context.
50 *
51 *  X0 = run_context    X1 = heir_context
52 *
53 *  This function copies the current registers to where x0 points, then
54 *  restores the ones from where x1 points.
55 *
56 */
57
58DEFINE_FUNCTION_AARCH64(_CPU_Context_switch)
59        .globl  _CPU_Context_switch_no_return
60        .set    _CPU_Context_switch_no_return, _CPU_Context_switch
61#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
62/* Sanitize inputs for ILP32 ABI */
63        mov w0, w0
64        mov w1, w1
65  #ifdef RTEMS_SMP
66    #define reg_2 x2
67  #else
68    #define reg_2 w2
69  #endif
70#else
71#define reg_2 x2
72#endif
73
74/* Start saving context */
75        GET_SELF_CPU_CONTROL    reg_2
76        ldr     w3, [x2, #PER_CPU_ISR_DISPATCH_DISABLE]
77
78        stp x19, x20, [x0]
79        stp x21, x22, [x0, #0x10]
80        stp x23, x24, [x0, #0x20]
81        stp x25, x26, [x0, #0x30]
82        stp x27, x28, [x0, #0x40]
83        stp fp,  lr,  [x0, #0x50]
84        mov x4,  sp
85        str x4,  [x0, #0x60]
86
87#ifdef AARCH64_MULTILIB_VFP
88        add     x5, x0, #AARCH64_CONTEXT_CONTROL_D8_OFFSET
89        stp d8,  d9,  [x5]
90        stp d10, d11, [x5, #0x10]
91        stp d12, d13, [x5, #0x20]
92        stp d14, d15, [x5, #0x30]
93#endif
94
95        str     x3, [x0, #AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE]
96
97#ifdef RTEMS_SMP
98        /*
99         * The executing thread no longer executes on this processor.  Switch
100         * the stack to the temporary interrupt stack of this processor.  Mark
101         * the context of the executing thread as not executing.
102         */
103        dmb     SY
104        add     sp, x2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE)
105        mov     x3, #0
106        strb    w3, [x0, #AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
107
108.L_check_is_executing:
109
110        /* Check the is executing indicator of the heir context */
111        add     x3, x1, #AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET
112        ldaxrb  w4, [x3]
113        cmp     x4, #0
114        bne     .L_get_potential_new_heir
115
116        /* Try to update the is executing indicator of the heir context */
117        mov     x4, #1
118        stlxrb  w5, w4, [x3]
119        cmp     x5, #0
120        bne     .L_get_potential_new_heir
121        dmb     SY
122#endif
123
124/* Start restoring context */
125.L_restore:
126#if !defined(RTEMS_SMP) && defined(AARCH64_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE)
127        clrex
128#endif
129
130        ldr     x3, [x1, #AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET]
131
132        ldr     x4, [x1, #AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE]
133
134#ifdef AARCH64_MULTILIB_VFP
135        add     x5, x1, #AARCH64_CONTEXT_CONTROL_D8_OFFSET
136        ldp d8,  d9,  [x5]
137        ldp d10, d11, [x5, #0x10]
138        ldp d12, d13, [x5, #0x20]
139        ldp d14, d15, [x5, #0x30]
140#endif
141
142        msr     TPIDR_EL0, x3
143
144        str     w4, [x2, #PER_CPU_ISR_DISPATCH_DISABLE]
145
146        ldp x19, x20, [x1]
147        ldp x21, x22, [x1, #0x10]
148        ldp x23, x24, [x1, #0x20]
149        ldp x25, x26, [x1, #0x30]
150        ldp x27, x28, [x1, #0x40]
151        ldp fp,  lr,  [x1, #0x50]
152        ldr x4,  [x1, #0x60]
153        mov sp,  x4
154        ret
155
156/*
157 *  void _CPU_Context_restore( new_context )
158 *
159 *  This function restores the registers from where x0 points.
160 *  It must match _CPU_Context_switch()
161 *
162 */
163DEFINE_FUNCTION_AARCH64(_CPU_Context_restore)
164#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
165/* Sanitize input for ILP32 ABI */
166        mov w0, w0
167#endif
168
169        mov     x1, x0
170        GET_SELF_CPU_CONTROL    reg_2
171        b       .L_restore
172
173#ifdef RTEMS_SMP
174.L_get_potential_new_heir:
175
176        /* We may have a new heir */
177
178        /* Read the executing and heir */
179#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
180        ldr     w4, [x2, #PER_CPU_OFFSET_EXECUTING]
181        ldr     w5, [x2, #PER_CPU_OFFSET_HEIR]
182#else
183        ldr     x4, [x2, #PER_CPU_OFFSET_EXECUTING]
184        ldr     x5, [x2, #PER_CPU_OFFSET_HEIR]
185#endif
186
187        /*
188         * Update the executing only if necessary to avoid cache line
189         * monopolization.
190         */
191        cmp     x4, x5
192        beq     .L_check_is_executing
193
194        /* Calculate the heir context pointer */
195        sub     x4, x1, x4
196        add     x1, x5, x4
197
198        /* Update the executing */
199#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
200        str     w5, [x2, #PER_CPU_OFFSET_EXECUTING]
201#else
202        str     x5, [x2, #PER_CPU_OFFSET_EXECUTING]
203#endif
204
205        b       .L_check_is_executing
206
207DEFINE_FUNCTION_AARCH64(_AArch64_Start_multitasking)
208#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
209        /* Sanitize input for ILP32 ABI */
210        mov     w0, w0
211#endif
212
213        mov     x1, x0
214        GET_SELF_CPU_CONTROL    reg_2
215
216        /* Switch the stack to the temporary interrupt stack of this processor */
217        add     sp, x2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE)
218
219        /* Enable interrupts */
220        msr     DAIFClr, #0x2
221
222        b       .L_check_is_executing
223#endif
Note: See TracBrowser for help on using the repository browser.