source: rtems/bsps/i386/shared/irq/irq_asm.S @ 5d4a1edc

5
Last change on this file since 5d4a1edc was 5d4a1edc, checked in by Jan Sommer <jan.sommer@…>, on May 31, 2020 at 2:22:55 PM

bsp/pc386: Define interrupt stack frame for smp

  • Defines CPU_Interrupt_frame in cpu_impl.h
  • Updates isq_asm.S to save/restore registers in matching order to

interrupt frame

  • Property mode set to 100644
File size: 7.6 KB
Line 
1/*
2 *  This file contains the implementation of the function described in irq.h
3 */
4
5/*
6 *  Copyright (C) 1998 valette@crf.canon.fr
7 *
8 *  COPYRIGHT (c) 1989-2011.
9 *  On-Line Applications Research Corporation (OAR).
10 *
11 *  The license and distribution terms for this file may be
12 *  found in found in the file LICENSE in this distribution or at
13 *  http://www.rtems.org/license/LICENSE.
14 */
15
16#include <rtems/asm.h>
17#include <bspopts.h>
18#include <rtems/score/cpu.h>
19#include <rtems/score/percpu.h>
20
21#include <bsp.h> /* to establish dependency on prototype */
22
23#ifndef CPU_STACK_ALIGNMENT
24#error "Missing header? CPU_STACK_ALIGNMENT is not defined here"
25#endif
26
27/* Stack frame we use for intermediate storage               */
28#define ARG_OFF  0
29#define EBX_OFF  4        /* ebx                              */
30#define EBP_OFF  8       /* code restoring ebp/esp relies on */
31#define ESP_OFF 12       /* esp being on top of ebp!         */
32#ifdef __SSE__
33#ifdef RTEMS_SMP
34#error SMP with SSE support has not been tested. Use at your own risk.
35#endif
36/* need to be on 16 byte boundary for SSE, add 12 to do that */
37#define FRM_SIZ (20+12+512)
38#define SSE_OFF 32
39#else
40#define FRM_SIZ 16
41#endif
42
43        BEGIN_CODE
44
45SYM (_ISR_Handler):
46        /*
47         *  Before this was point is reached the vectors unique
48         *  entry point did the following:
49         *
50         *     1. saved scratch registers registers eax edx ecx"
51         *     2. put the vector number in ecx.
52         *
53         * BEGINNING OF ESTABLISH SEGMENTS
54         *
55         *  WARNING: If an interrupt can occur when the segments are
56         *           not correct, then this is where we should establish
57         *           the segments.  In addition to establishing the
58         *           segments, it may be necessary to establish a stack
59         *           in the current data area on the outermost interrupt.
60         *
61         *  NOTE:  If the previous values of the segment registers are
62         *         pushed, do not forget to adjust SAVED_REGS.
63         *
64         *  NOTE:  Make sure the Lthread_dispatch_done code restores these
65         *         when this type of code is needed.
66         */
67
68        /***** ESTABLISH SEGMENTS CODE GOES HERE  ******/
69
70        /*
71         * END OF ESTABLISH SEGMENTS
72         */
73
74        /*
75         * Establish an aligned stack frame
76         *   original sp
77         *   saved ebp
78         *   saved ebx
79         *   vector arg to BSP_dispatch_isr   <- aligned SP
80         */
81        movl      esp, eax
82        subl      $FRM_SIZ, esp
83        movl      eax, ESP_OFF(esp)
84        movl      ebp, EBP_OFF(esp)
85        movl      ebx, EBX_OFF(esp)
86
87        /*
88         * GCC versions starting with 4.3 no longer place the cld
89         * instruction before string operations.  We  need to ensure
90         * it is set correctly for ISR handlers.
91         */
92        cld
93
94#ifdef __SSE__
95        /* NOTE: SSE only is supported if the BSP enables fxsave/fxrstor
96         * to save/restore SSE context! This is so far only implemented
97         * for pc386!.
98         */
99
100        /* We save SSE here (on the task stack) because we possibly
101         * call other C-code (besides the ISR, namely _Thread_Dispatch())
102         */
103        /*  don't wait here; a possible exception condition will eventually be
104         *  detected when the task resumes control and executes a FP instruction
105        fwait
106         */
107        fxsave SSE_OFF(esp)
108        fninit                          /* clean-slate FPU                */
109        movl   $0x1f80, ARG_OFF(esp)    /* use ARG_OFF as scratch space   */
110        ldmxcsr ARG_OFF(esp)            /* clean-slate MXCSR              */
111#endif
112
113        /*
114         *  Now switch stacks if necessary
115         */
116
117PUBLIC (ISR_STOP)
118ISR_STOP:
119.check_stack_switch:
120        movl      esp, ebp                  /* ebp = previous stack pointer */
121        andl      $ - CPU_STACK_ALIGNMENT, esp  /* Make sure esp is 16 byte aligned */
122
123        GET_SELF_CPU_CONTROL ebx
124
125        /* is this the outermost interrupt? */
126        cmpl      $0, PER_CPU_ISR_NEST_LEVEL(ebx)
127        jne       nested                    /* No, then continue */
128        movl      PER_CPU_INTERRUPT_STACK_HIGH(ebx), esp
129
130        /*
131         *  We want to insure that the old stack pointer is in ebp
132         *  By saving it on every interrupt, all we have to do is
133         *  movl ebp->esp near the end of every interrupt.
134         */
135
136nested:
137        incl      PER_CPU_ISR_NEST_LEVEL(ebx)  /* one nest level deeper */
138        incl      PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(ebx) /* disable
139                                                                multitasking */
140        /*
141         *  ECX is preloaded with the vector number; store as arg
142         *  on top of stack. Note that _CPU_Interrupt_stack_high
143         *  was adjusted in _CPU_Interrupt_stack_setup() (score/rtems/cpu.h)
144         *  to make sure there is space.
145         */
146
147        movl      ecx, ARG_OFF(esp)  /* store vector arg in stack */
148        call      BSP_dispatch_isr
149
150        movl      ARG_OFF(esp), ecx     /* grab vector arg from stack */
151
152        /*
153         * Restore stack. This moves back to the task stack
154         * when all interrupts are unnested.
155         */
156        movl      ebp, esp
157
158        /*
159         * Thread dispatching is necessary and allowed if and only if
160         *   dispatch_necessary == 1 and
161         *   isr_dispatch_disable == 0 and
162         *   thread_dispatch_disable_level == 0.
163         *
164         * Otherwise, continue with .Lthread_dispatch_done
165         */
166        movl      PER_CPU_DISPATCH_NEEDED(ebx), eax
167        xorl      PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(ebx), eax
168        decl      PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(ebx)
169        orl       PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(ebx), eax
170        orl       PER_CPU_ISR_DISPATCH_DISABLE(ebx), eax
171        decl      PER_CPU_ISR_NEST_LEVEL(ebx)  /* one less ISR nest level */
172
173        cmpl      $0, eax
174        jne       .Lthread_dispatch_done    /* Is task switch necessary? */
175
176.Ldo_thread_dispatch:
177          /* Set ISR dispatch disable and thread dispatch disable level to one */
178          movl    $1, PER_CPU_ISR_DISPATCH_DISABLE(ebx)
179          movl    $1, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(ebx)
180          /* Call Thread_Do_dispatch(), this function will enable interrupts */
181          push    $EFLAGS_INTR_ENABLE      /* Set interrupt flag manually */
182          push    ebx
183          call    _Thread_Do_dispatch
184
185      /* Disable interrupts */
186          cli
187          addl    $8, esp
188          /* Sometimes after returning from _Thread_Do_dispatch current CPU and ebx ptr are different */
189          GET_SELF_CPU_CONTROL ebx
190          cmpb    $0, PER_CPU_DISPATCH_NEEDED(ebx)
191          jne     .Ldo_thread_dispatch
192
193          /* We are done with thread dispatching */
194          movl    $0, PER_CPU_ISR_DISPATCH_DISABLE(ebx)
195         /*
196          * fall through Lthread_dispatch_done to restore complete contex (scratch registers
197          * eip, CS, Flags).
198          */
199.Lthread_dispatch_done:
200
201#ifdef __SSE__
202        fwait
203        fxrstor   SSE_OFF(esp)
204#endif
205
206        /* restore ebx, ebp and original esp */
207        addl      $EBX_OFF, esp
208        popl      ebx
209        popl      ebp
210        popl      esp
211
212        /*
213         * BEGINNING OF DE-ESTABLISH SEGMENTS
214         *
215         *  NOTE:  Make sure there is code here if code is added to
216         *         load the segment registers.
217         *
218         */
219
220        /******* DE-ESTABLISH SEGMENTS CODE GOES HERE ********/
221
222        /*
223         * END OF DE-ESTABLISH SEGMENTS
224         */
225        popl      edx
226        popl      ecx
227        popl      eax
228        iret
229
230#define DISTINCT_INTERRUPT_ENTRY(_vector) \
231        .p2align 4                         ; \
232        PUBLIC (rtems_irq_prologue_ ## _vector ) ; \
233SYM (rtems_irq_prologue_ ## _vector ):             \
234        pushl     eax                ; \
235        pushl     ecx                ; \
236        pushl     edx                ; \
237        movl      $ _vector, ecx     ; \
238        jmp       SYM (_ISR_Handler) ;
239
240DISTINCT_INTERRUPT_ENTRY(0)
241DISTINCT_INTERRUPT_ENTRY(1)
242DISTINCT_INTERRUPT_ENTRY(2)
243DISTINCT_INTERRUPT_ENTRY(3)
244DISTINCT_INTERRUPT_ENTRY(4)
245DISTINCT_INTERRUPT_ENTRY(5)
246DISTINCT_INTERRUPT_ENTRY(6)
247DISTINCT_INTERRUPT_ENTRY(7)
248DISTINCT_INTERRUPT_ENTRY(8)
249DISTINCT_INTERRUPT_ENTRY(9)
250DISTINCT_INTERRUPT_ENTRY(10)
251DISTINCT_INTERRUPT_ENTRY(11)
252DISTINCT_INTERRUPT_ENTRY(12)
253DISTINCT_INTERRUPT_ENTRY(13)
254DISTINCT_INTERRUPT_ENTRY(14)
255DISTINCT_INTERRUPT_ENTRY(15)
256DISTINCT_INTERRUPT_ENTRY(16)
257
258        /*
259         * routine used to initialize the IDT by default
260         */
261
262PUBLIC (default_raw_idt_handler)
263PUBLIC (raw_idt_notify)
264
265SYM (default_raw_idt_handler):
266        pusha
267        cld
268        mov       esp, ebp
269        andl     $ - CPU_STACK_ALIGNMENT, esp
270        call      raw_idt_notify
271        mov       ebp, esp
272        popa
273        iret
274
275END_CODE
276
277END
Note: See TracBrowser for help on using the repository browser.