source: rtems/cpukit/score/cpu/arm/arm_exc_interrupt.S @ 610909f

5
Last change on this file since 610909f was 5c7bfcf, checked in by Sebastian Huber <sebastian.huber@…>, on 11/11/15 at 10:49:45

Fix interrupt epilogue for ARMv7-AR and PowerPC

  • Property mode set to 100644
File size: 6.2 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreCPU
5 *
6 * @brief ARM interrupt exception prologue and epilogue.
7 */
8
9/*
10 * Copyright (c) 2009-2014 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23/*
24 * The upper EXCHANGE_SIZE bytes of the INT stack area are used for data
25 * exchange between INT and SVC mode.  Below of this is the actual INT stack.
26 * The exchange area is only accessed if INT is disabled.
27 */
28
29#ifdef HAVE_CONFIG_H
30#include "config.h"
31#endif
32
33#include <rtems/asm.h>
34
35#ifdef ARM_MULTILIB_ARCH_V4
36
37#define EXCHANGE_LR r4
38#define EXCHANGE_SPSR r5
39#define EXCHANGE_CPSR r6
40#define EXCHANGE_INT_SP r8
41
42#define EXCHANGE_LIST {EXCHANGE_LR, EXCHANGE_SPSR, EXCHANGE_CPSR, EXCHANGE_INT_SP}
43#define EXCHANGE_SIZE 16
44
45#define SELF_CPU_CONTROL r7
46#define SP_OF_INTERRUPTED_CONTEXT r9
47
48#define CONTEXT_LIST {r0, r1, r2, r3, EXCHANGE_LR, EXCHANGE_SPSR, SELF_CPU_CONTROL, r12}
49#define CONTEXT_SIZE 32
50
51.arm
52.globl _ARMV4_Exception_interrupt
53_ARMV4_Exception_interrupt:
54
55        /* Save exchange registers to exchange area */
56        stmdb   sp, EXCHANGE_LIST
57
58        /* Set exchange registers */
59        mov     EXCHANGE_LR, lr
60        mrs     EXCHANGE_SPSR, SPSR
61        mrs     EXCHANGE_CPSR, CPSR
62        sub     EXCHANGE_INT_SP, sp, #EXCHANGE_SIZE
63
64        /* Switch to SVC mode */
65        orr     EXCHANGE_CPSR, EXCHANGE_CPSR, #0x1
66        msr     CPSR_c, EXCHANGE_CPSR
67
68        /*
69         * Save context.  We save the link register separately because it has
70         * to be restored in SVC mode.  The other registers can be restored in
71         * INT mode.  Ensure that stack remains 8 byte aligned.  Use register
72         * necessary for the stack alignment for the stack pointer of the
73         * interrupted context.
74         */
75        stmdb   sp!, CONTEXT_LIST
76        stmdb   sp!, {SP_OF_INTERRUPTED_CONTEXT, lr}
77
78#ifdef ARM_MULTILIB_VFP
79        /* Save VFP context */
80        vmrs    r0, FPSCR
81        vstmdb  sp!, {d0-d7}
82#ifdef ARM_MULTILIB_VFP_D32
83        vstmdb  sp!, {d16-d31}
84#endif
85        stmdb   sp!, {r0, r1}
86#endif /* ARM_MULTILIB_VFP */
87
88        /* Get per-CPU control of current processor */
89        GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL, r1
90
91        /* Remember INT stack pointer */
92        mov     r1, EXCHANGE_INT_SP
93
94        /* Restore exchange registers from exchange area */
95        ldmia   r1, EXCHANGE_LIST
96
97        /* Get interrupt nest level */
98        ldr     r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
99
100        /* Switch stack if necessary and save original stack pointer */
101        mov     SP_OF_INTERRUPTED_CONTEXT, sp
102        cmp     r2, #0
103        moveq   sp, r1
104
105        /* Switch to THUMB instructions if necessary */
106        SWITCH_FROM_ARM_TO_THUMB        r1
107
108        /* Increment interrupt nest and thread dispatch disable level */
109        ldr     r3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
110        add     r2, #1
111        add     r3, #1
112        str     r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
113        str     r3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
114
115#ifdef RTEMS_PROFILING
116        cmp     r2, #1
117        bne     profiling_entry_done
118        bl      _CPU_Counter_read
119        push    {r0, r1}
120profiling_entry_done:
121#endif
122
123        /* Call BSP dependent interrupt dispatcher */
124        bl      bsp_interrupt_dispatch
125
126        /* Decrement interrupt nest and thread dispatch disable level */
127        ldr     r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
128        ldr     r3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
129        sub     r2, #1
130        sub     r3, #1
131        str     r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
132        str     r3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
133
134#ifdef RTEMS_PROFILING
135        cmp     r2, #0
136        bne     profiling_exit_done
137        bl      _CPU_Counter_read
138        pop     {r1, r3}
139        mov     r2, r0
140        mov     r0, SELF_CPU_CONTROL
141        bl      _Profiling_Outer_most_interrupt_entry_and_exit
142        ldr     r3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
143profiling_exit_done:
144#endif
145
146        /* Restore stack pointer */
147        mov     sp, SP_OF_INTERRUPTED_CONTEXT
148
149        /* Check thread dispatch disable level */
150        cmp     r3, #0
151        bne     thread_dispatch_done
152
153        /* Check context switch necessary */
154        ldrb    r1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
155        cmp     r1, #0
156        beq     thread_dispatch_done
157
158        /* This aligns thread_dispatch_done on a 4 byte boundary */
159#ifdef __thumb__
160        nop
161#endif /* __thumb__ */
162
163        /* Thread dispatch */
164        bl      _Thread_Dispatch
165
166thread_dispatch_done:
167
168        /* Switch to ARM instructions if necessary */
169        SWITCH_FROM_THUMB_TO_ARM
170
171#ifdef ARM_MULTILIB_VFP
172        /* Restore VFP context */
173        ldmia   sp!, {r0, r1}
174#ifdef ARM_MULTILIB_VFP_D32
175        vldmia  sp!, {d16-d31}
176#endif
177        vldmia  sp!, {d0-d7}
178        vmsr    FPSCR, r0
179#endif /* ARM_MULTILIB_VFP */
180
181        /* Restore SP_OF_INTERRUPTED_CONTEXT register and link register */
182        ldmia   sp!, {SP_OF_INTERRUPTED_CONTEXT, lr}
183
184        /*
185         * XXX: Remember and restore stack pointer.  The data on the stack is
186         * still in use.  So the stack is now in an inconsistent state.  The
187         * FIQ handler implementation must not use this area.
188         */
189        mov     r0, sp
190        add     sp, #CONTEXT_SIZE
191
192        /* Get INT mode program status register */
193        mrs     r1, CPSR
194        bic     r1, r1, #0x1
195
196        /* Switch to INT mode */
197        msr     CPSR_c, r1
198
199        /* Save EXCHANGE_LR and EXCHANGE_SPSR registers to exchange area */
200        stmdb   sp!, {EXCHANGE_LR, EXCHANGE_SPSR}
201
202        /* Restore context */
203        ldmia   r0, CONTEXT_LIST
204
205        /* Set return address and program status */
206        mov     lr, EXCHANGE_LR
207        msr     SPSR_fsxc, EXCHANGE_SPSR
208
209        /* Restore EXCHANGE_LR and EXCHANGE_SPSR registers from exchange area */
210        ldmia   sp!, {EXCHANGE_LR, EXCHANGE_SPSR}
211
212#ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
213        /*
214         * We must clear reservations here, since otherwise compare-and-swap
215         * atomic operations with interrupts enabled may yield wrong results.
216         * A compare-and-swap atomic operation is generated by the compiler
217         * like this:
218         *
219         *   .L1:
220         *     ldrex r1, [r0]
221         *     cmp   r1, r3
222         *     bne   .L2
223         *     strex r3, r2, [r0]
224         *     cmp   r3, #0
225         *     bne   .L1
226         *   .L2:
227         *
228         * Consider the following scenario.  A thread is interrupted right
229         * before the strex.  The interrupt updates the value using a
230         * compare-and-swap sequence.  Everything is fine up to this point.
231         * The interrupt performs now a compare-and-swap sequence which fails
232         * with a branch to .L2.  The current processor has now a reservation.
233         * The interrupt returns without further strex.  The thread updates the
234         * value using the unrelated reservation of the interrupt.
235         */
236        clrex
237#endif
238
239        /* Return from interrupt */
240        subs    pc, lr, #4
241
242#endif /* ARM_MULTILIB_ARCH_V4 */
Note: See TracBrowser for help on using the repository browser.