source: rtems/c/src/lib/libbsp/i386/shared/irq/irq_asm.S @ e94aa61b

4.115
Last change on this file since e94aa61b was e94aa61b, checked in by Till Straumann <strauman@…>, on 08/05/11 at 00:15:50

2011-08-04 Till Straumann <strauman@…>

  • shared/irq/irq_asm.S: BUGFIX (introduced by SMP changes which moved code around, apparently): *must* store i8259 mask to frame *before* switching to IRQ stack. The code retrieves the mask after switching back to original stack. Also, the IRQ stack has no reserved space for the mask; storing it there could overwrite memory!
  • Property mode set to 100644
File size: 8.4 KB
Line 
1/* irq.c
2 *
3 *  This file contains the implementation of the function described in irq.h
4 *
5 *  Copyright (C) 1998 valette@crf.canon.fr
6 *
7 *  COPYRIGHT (c) 1989-2011.
8 *  On-Line Applications Research Corporation (OAR).
9 *
10 *  The license and distribution terms for this file may be
11 *  found in found in the file LICENSE in this distribution or at
12 *  http://www.rtems.com/license/LICENSE.
13 *
14 *  $Id$
15 */
16
17#include <rtems/asm.h>
18#include <rtems/system.h>
19#include <bspopts.h>
20#include <bsp/irq_asm.h>
21#include <rtems/score/cpu.h>
22#include <rtems/score/percpu.h>
23
24#ifndef CPU_STACK_ALIGNMENT
25#error "Missing header? CPU_STACK_ALIGNMENT is not defined here"
26#endif
27
28/* Stack frame we use for intermediate storage               */
29#define ARG_OFF 0
30#define MSK_OFF 4
31#define EBX_OFF 8        /* ebx                              */
32#define EBP_OFF 12       /* code restoring ebp/esp relies on */
33#define ESP_OFF 16       /* esp being on top of ebp!         */
34#ifdef __SSE__
35/* need to be on 16 byte boundary for SSE, add 12 to do that */
36#define FRM_SIZ (20+12+512)
37#define SSE_OFF 32
38#else
39#define FRM_SIZ 20
40#endif
41
42        BEGIN_CODE
43
44SYM (_ISR_Handler):
45        /*
46         *  Before this was point is reached the vectors unique
47         *  entry point did the following:
48         *
49         *     1. saved scratch registers registers eax edx ecx
50         *     2. put the vector number in ecx.
51         *
52         *  NOTE:  If the previous values of the segment registers are
53         *         pushed, do not forget to adjust SAVED_REGS.
54         *
55         *  NOTE:  Make sure the exit code which restores these
56         *         when this type of code is needed.
57         */
58
59        /***** ESTABLISH SEGMENTS CODE GOES HERE  ******/
60
61        /*
62         * END OF ESTABLISH SEGMENTS
63         */
64
65        /*
66         * Establish an aligned stack frame
67         *   original sp
68         *   saved ebx
69         *   saved ebp
70         *   saved irq mask
71         *   vector arg to C_dispatch_isr   <- aligned SP
72         */
73        movl      esp, eax
74        subl      $FRM_SIZ, esp
75        andl      $ - CPU_STACK_ALIGNMENT, esp
76        movl      ebx, EBX_OFF(esp)
77        movl      eax, ESP_OFF(esp)
78        movl      ebp, EBP_OFF(esp)
79        movw      SYM (i8259s_cache), ax /* save current i8259 interrupt mask */
80        movl      eax, MSK_OFF(esp)      /* save in stack frame */
81
82#ifdef __SSE__
83        /* NOTE: SSE only is supported if the BSP enables fxsave/fxrstor
84         * to save/restore SSE context! This is so far only implemented
85         * for pc386!.
86         */
87
88        /* We save SSE here (on the task stack) because we possibly
89         * call other C-code (besides the ISR, namely _Thread_Dispatch())
90         */
91    /*  don't wait here; a possible exception condition will eventually be
92     *  detected when the task resumes control and executes a FP instruction
93        fwait
94     */
95        fxsave SSE_OFF(esp)
96        fninit                          /* clean-slate FPU                */
97        movl   $0x1f80, ARG_OFF(esp)    /* use ARG_OFF as scratch space   */
98        ldmxcsr ARG_OFF(esp)            /* clean-slate MXCSR              */
99#endif
100
101.check_stack_switch:
102        movl      esp, ebp                  /* ebp = previous stack pointer */
103#if defined(RTEMS_SMP) && defined(BSP_HAS_SMP)
104        movl     $SYM(_Per_CPU_Information_p), ebx
105        call     SYM(bsp_smp_processor_id)
106        mov      (ebx,eax,4), ebx
107        pushl    ecx
108        call     SYM(_ISR_SMP_Enter)
109        popl     ecx
110        cmpl     $0, eax
111        jne      .i8259
112        movl     PER_CPU_INTERRUPT_STACK_HIGH(ebx), esp
113       
114#else
115        movl     $SYM(_Per_CPU_Information), ebx
116
117        /*
118         *  Is this the outermost interrupt?
119         *  Switch stacks if necessary
120         */
121        cmpl      $0, PER_CPU_ISR_NEST_LEVEL(ebx)
122        jne       nested                    /* No, then continue */
123        movl      PER_CPU_INTERRUPT_STACK_HIGH(ebx), esp
124
125        /*
126         *  We want to insure that the old stack pointer is in ebp
127         *  By saving it on every interrupt, all we have to do is
128         *  movl ebp->esp near the end of every interrupt.
129         */
130
131nested:
132        incl      PER_CPU_ISR_NEST_LEVEL(ebx)  /* one nest level deeper */
133        incl      SYM (_Thread_Dispatch_disable_level) /* disable multitasking */
134#endif
135        /*
136         *  i8259 Management
137         */
138
139.i8259:
140        /* Do not disable any 8259 interrupts if this isn't from one */
141        cmp       ecx, 16               /* is this a PIC IRQ? */
142        jge       .end_of_i8259
143
144        /*
145         * acknowledge the interrupt
146         */
147        movw      SYM (i8259s_cache), ax /* fetch current i8259 interrupt mask */
148
149        /*
150         * compute the new PIC mask:
151         *
152         * <new mask> = <old mask> | irq_mask_or_tbl[<intr number aka ecx>]
153         */
154        movw      SYM (irq_mask_or_tbl) (,ecx,2), dx
155        orw       dx, ax
156        /*
157         * Install new computed value on the i8259 and update cache
158         * accordingly
159         */
160        movw      ax, SYM (i8259s_cache)
161        outb      $PIC_MASTER_IMR_IO_PORT
162        movb      ah, al
163        outb      $PIC_SLAVE_IMR_IO_PORT
164
165        movb      $PIC_EOI, al
166        cmpl      $7, ecx
167        jbe      .master
168        outb      $PIC_SLAVE_COMMAND_IO_PORT
169.master:
170        outb      $PIC_MASTER_COMMAND_IO_PORT
171.end_of_i8259:
172
173        /*
174         * re-enable interrupts at processor level as the current
175         * interrupt source is now masked via i8259
176         */
177        sti
178
179        /*
180         *  ECX is preloaded with the vector number; store as arg
181         *  on top of stack. Note that _CPU_Interrupt_stack_high
182         *  was adjusted in _CPU_Interrupt_stack_setup() (score/rtems/cpu.h)
183         *  to make sure there is space.
184         */
185
186        movl      ecx, ARG_OFF(esp)  /* store vector arg in stack */
187        call      C_dispatch_isr
188
189        /*
190         * disable interrupts_again
191         */
192        cli
193
194        movl      ARG_OFF(esp), ecx     /* grab vector arg from stack */
195
196        /*
197         * Restore stack. This moves back to the task stack
198         * when all interrupts are unnested.
199         */
200        movl      ebp, esp
201
202        /*
203         * restore the original i8259 masks
204         */
205        /* Do not touch 8259 interrupts if this isn't from one */
206        cmp       ecx, 16               /* is this a PIC IRQ? */
207        jge       .dont_restore_i8259
208
209        movl      MSK_OFF(esp), eax
210        movw      ax, SYM (i8259s_cache)
211        outb      $PIC_MASTER_IMR_IO_PORT
212        movb      ah, al
213        outb      $PIC_SLAVE_IMR_IO_PORT
214.dont_restore_i8259:
215
216
217#if defined(RTEMS_SMP) && defined(BSP_HAS_SMP)
218        call    SYM(_ISR_SMP_Exit)
219        testl   eax, eax
220        je      .exit
221#else
222        decl      PER_CPU_ISR_NEST_LEVEL(ebx)  /* one less ISR nest level */
223                                            /* If interrupts are nested, */
224                                            /*   then dispatching is disabled */
225
226        decl      SYM (_Thread_Dispatch_disable_level)
227                                            /* unnest multitasking */
228                                            /* Is dispatch disabled */
229        jne       .exit                     /* Yes, then exit */
230
231        cmpb      $0, PER_CPU_DISPATCH_NEEDED(ebx)
232                                            /* Is task switch necessary? */
233        jne       .schedule                 /* Yes, then call the scheduler */
234        jmp       .exit                     /* No, exit */
235#endif
236
237.schedule:
238        /*
239         * the scratch registers have already been saved and we are already
240         * back on the thread system stack. So we can call _Thread_Dispatch
241         * directly
242         */
243        call      _Thread_Dispatch
244        /*
245         * fall through exit to restore complete contex (scratch registers
246         * eip, CS, Flags).
247         */
248.exit:
249
250#ifdef __SSE__
251        fwait
252        fxrstor   SSE_OFF(esp)
253#endif
254
255        /* restore ebx, ebp and original esp */
256        addl      $EBX_OFF, esp
257        popl      ebx
258        popl      ebp
259        popl      esp
260
261        /*
262         * BEGINNING OF DE-ESTABLISH SEGMENTS
263         *
264         *  NOTE:  Make sure there is code here if code is added to
265         *         load the segment registers.
266         *
267         */
268
269        /******* DE-ESTABLISH SEGMENTS CODE GOES HERE ********/
270
271        /*
272         * END OF DE-ESTABLISH SEGMENTS
273         */
274        popl      edx
275        popl      ecx
276        popl      eax
277        iret
278
279#define DISTINCT_INTERRUPT_ENTRY(_vector) \
280        .p2align 4                         ; \
281        PUBLIC (rtems_irq_prologue_ ## _vector ) ; \
282SYM (rtems_irq_prologue_ ## _vector ):             \
283        pushl     eax                ; \
284        pushl     ecx                ; \
285        pushl     edx                ; \
286        movl      $ _vector, ecx     ; \
287        jmp       SYM (_ISR_Handler) ;
288
289DISTINCT_INTERRUPT_ENTRY(0)
290DISTINCT_INTERRUPT_ENTRY(1)
291DISTINCT_INTERRUPT_ENTRY(2)
292DISTINCT_INTERRUPT_ENTRY(3)
293DISTINCT_INTERRUPT_ENTRY(4)
294DISTINCT_INTERRUPT_ENTRY(5)
295DISTINCT_INTERRUPT_ENTRY(6)
296DISTINCT_INTERRUPT_ENTRY(7)
297DISTINCT_INTERRUPT_ENTRY(8)
298DISTINCT_INTERRUPT_ENTRY(9)
299DISTINCT_INTERRUPT_ENTRY(10)
300DISTINCT_INTERRUPT_ENTRY(11)
301DISTINCT_INTERRUPT_ENTRY(12)
302DISTINCT_INTERRUPT_ENTRY(13)
303DISTINCT_INTERRUPT_ENTRY(14)
304DISTINCT_INTERRUPT_ENTRY(15)
305DISTINCT_INTERRUPT_ENTRY(16)
306
307        /*
308         * routine used to initialize the IDT by default
309         */
310
311PUBLIC (default_raw_idt_handler)
312PUBLIC (raw_idt_notify)
313
314SYM (default_raw_idt_handler):
315        pusha
316        cld
317        mov       esp, ebp
318        andl     $ - CPU_STACK_ALIGNMENT, esp
319        call      raw_idt_notify
320        mov       ebp, esp
321        popa
322        iret
323
324END_CODE
325
326END
Note: See TracBrowser for help on using the repository browser.