1 | /* irq.c |
---|
2 | * |
---|
3 | * This file contains the implementation of the function described in irq.h |
---|
4 | * |
---|
5 | * Copyright (C) 1998 valette@crf.canon.fr |
---|
6 | * |
---|
7 | * COPYRIGHT (c) 1989-2011. |
---|
8 | * On-Line Applications Research Corporation (OAR). |
---|
9 | * |
---|
10 | * The license and distribution terms for this file may be |
---|
11 | * found in found in the file LICENSE in this distribution or at |
---|
12 | * http://www.rtems.com/license/LICENSE. |
---|
13 | * |
---|
14 | * $Id$ |
---|
15 | */ |
---|
16 | |
---|
17 | #include <rtems/asm.h> |
---|
18 | #include <rtems/system.h> |
---|
19 | #include <bspopts.h> |
---|
20 | #include <bsp/irq_asm.h> |
---|
21 | #include <rtems/score/cpu.h> |
---|
22 | #include <rtems/score/percpu.h> |
---|
23 | |
---|
24 | #ifndef CPU_STACK_ALIGNMENT |
---|
25 | #error "Missing header? CPU_STACK_ALIGNMENT is not defined here" |
---|
26 | #endif |
---|
27 | |
---|
28 | /* Stack frame we use for intermediate storage */ |
---|
29 | #define ARG_OFF 0 |
---|
30 | #define MSK_OFF 4 |
---|
31 | #define EBX_OFF 8 /* ebx */ |
---|
32 | #define EBP_OFF 12 /* code restoring ebp/esp relies on */ |
---|
33 | #define ESP_OFF 16 /* esp being on top of ebp! */ |
---|
34 | #ifdef __SSE__ |
---|
35 | /* need to be on 16 byte boundary for SSE, add 12 to do that */ |
---|
36 | #define FRM_SIZ (20+12+512) |
---|
37 | #define SSE_OFF 32 |
---|
38 | #else |
---|
39 | #define FRM_SIZ 20 |
---|
40 | #endif |
---|
41 | |
---|
42 | BEGIN_CODE |
---|
43 | |
---|
44 | SYM (_ISR_Handler): |
---|
45 | /* |
---|
46 | * Before this was point is reached the vectors unique |
---|
47 | * entry point did the following: |
---|
48 | * |
---|
49 | * 1. saved scratch registers registers eax edx ecx |
---|
50 | * 2. put the vector number in ecx. |
---|
51 | * |
---|
52 | * NOTE: If the previous values of the segment registers are |
---|
53 | * pushed, do not forget to adjust SAVED_REGS. |
---|
54 | * |
---|
55 | * NOTE: Make sure the exit code which restores these |
---|
56 | * when this type of code is needed. |
---|
57 | */ |
---|
58 | |
---|
59 | /***** ESTABLISH SEGMENTS CODE GOES HERE ******/ |
---|
60 | |
---|
61 | /* |
---|
62 | * END OF ESTABLISH SEGMENTS |
---|
63 | */ |
---|
64 | |
---|
65 | /* |
---|
66 | * Establish an aligned stack frame |
---|
67 | * original sp |
---|
68 | * saved ebx |
---|
69 | * saved ebp |
---|
70 | * saved irq mask |
---|
71 | * vector arg to C_dispatch_isr <- aligned SP |
---|
72 | */ |
---|
73 | movl esp, eax |
---|
74 | subl $FRM_SIZ, esp |
---|
75 | andl $ - CPU_STACK_ALIGNMENT, esp |
---|
76 | movl ebx, EBX_OFF(esp) |
---|
77 | movl eax, ESP_OFF(esp) |
---|
78 | movl ebp, EBP_OFF(esp) |
---|
79 | |
---|
80 | #ifdef __SSE__ |
---|
81 | /* NOTE: SSE only is supported if the BSP enables fxsave/fxrstor |
---|
82 | * to save/restore SSE context! This is so far only implemented |
---|
83 | * for pc386!. |
---|
84 | */ |
---|
85 | |
---|
86 | /* We save SSE here (on the task stack) because we possibly |
---|
87 | * call other C-code (besides the ISR, namely _Thread_Dispatch()) |
---|
88 | */ |
---|
89 | /* don't wait here; a possible exception condition will eventually be |
---|
90 | * detected when the task resumes control and executes a FP instruction |
---|
91 | fwait |
---|
92 | */ |
---|
93 | fxsave SSE_OFF(esp) |
---|
94 | fninit /* clean-slate FPU */ |
---|
95 | movl $0x1f80, ARG_OFF(esp) /* use ARG_OFF as scratch space */ |
---|
96 | ldmxcsr ARG_OFF(esp) /* clean-slate MXCSR */ |
---|
97 | #endif |
---|
98 | |
---|
99 | .check_stack_switch: |
---|
100 | movl esp, ebp /* ebp = previous stack pointer */ |
---|
101 | #if defined(RTEMS_SMP) && defined(BSP_HAS_SMP) |
---|
102 | movl $SYM(_Per_CPU_Information_p), ebx |
---|
103 | call SYM(bsp_smp_processor_id) |
---|
104 | mov (ebx,eax,4), ebx |
---|
105 | pushl ecx |
---|
106 | call SYM(_ISR_SMP_Enter) |
---|
107 | popl ecx |
---|
108 | cmpl $0, eax |
---|
109 | jne .i8259 |
---|
110 | movl PER_CPU_INTERRUPT_STACK_HIGH(ebx), esp |
---|
111 | |
---|
112 | #else |
---|
113 | movl $SYM(_Per_CPU_Information), ebx |
---|
114 | |
---|
115 | /* |
---|
116 | * Is this the outermost interrupt? |
---|
117 | * Switch stacks if necessary |
---|
118 | */ |
---|
119 | cmpl $0, PER_CPU_ISR_NEST_LEVEL(ebx) |
---|
120 | jne nested /* No, then continue */ |
---|
121 | movl PER_CPU_INTERRUPT_STACK_HIGH(ebx), esp |
---|
122 | |
---|
123 | /* |
---|
124 | * We want to insure that the old stack pointer is in ebp |
---|
125 | * By saving it on every interrupt, all we have to do is |
---|
126 | * movl ebp->esp near the end of every interrupt. |
---|
127 | */ |
---|
128 | |
---|
129 | nested: |
---|
130 | incl PER_CPU_ISR_NEST_LEVEL(ebx) /* one nest level deeper */ |
---|
131 | incl SYM (_Thread_Dispatch_disable_level) /* disable multitasking */ |
---|
132 | #endif |
---|
133 | /* |
---|
134 | * i8259 Management |
---|
135 | */ |
---|
136 | |
---|
137 | .i8259: |
---|
138 | /* Do not disable any 8259 interrupts if this isn't from one */ |
---|
139 | cmp ecx, 16 /* is this a PIC IRQ? */ |
---|
140 | jge .end_of_i8259 |
---|
141 | |
---|
142 | /* |
---|
143 | * acknowledge the interrupt |
---|
144 | */ |
---|
145 | movw SYM (i8259s_cache), ax /* save current i8259 interrupt mask */ |
---|
146 | movl eax, MSK_OFF(esp) /* save in stack frame */ |
---|
147 | |
---|
148 | /* |
---|
149 | * compute the new PIC mask: |
---|
150 | * |
---|
151 | * <new mask> = <old mask> | irq_mask_or_tbl[<intr number aka ecx>] |
---|
152 | */ |
---|
153 | movw SYM (irq_mask_or_tbl) (,ecx,2), dx |
---|
154 | orw dx, ax |
---|
155 | /* |
---|
156 | * Install new computed value on the i8259 and update cache |
---|
157 | * accordingly |
---|
158 | */ |
---|
159 | movw ax, SYM (i8259s_cache) |
---|
160 | outb $PIC_MASTER_IMR_IO_PORT |
---|
161 | movb ah, al |
---|
162 | outb $PIC_SLAVE_IMR_IO_PORT |
---|
163 | |
---|
164 | movb $PIC_EOI, al |
---|
165 | cmpl $7, ecx |
---|
166 | jbe .master |
---|
167 | outb $PIC_SLAVE_COMMAND_IO_PORT |
---|
168 | .master: |
---|
169 | outb $PIC_MASTER_COMMAND_IO_PORT |
---|
170 | .end_of_i8259: |
---|
171 | |
---|
172 | /* |
---|
173 | * re-enable interrupts at processor level as the current |
---|
174 | * interrupt source is now masked via i8259 |
---|
175 | */ |
---|
176 | sti |
---|
177 | |
---|
178 | /* |
---|
179 | * ECX is preloaded with the vector number; store as arg |
---|
180 | * on top of stack. Note that _CPU_Interrupt_stack_high |
---|
181 | * was adjusted in _CPU_Interrupt_stack_setup() (score/rtems/cpu.h) |
---|
182 | * to make sure there is space. |
---|
183 | */ |
---|
184 | |
---|
185 | movl ecx, ARG_OFF(esp) /* store vector arg in stack */ |
---|
186 | call C_dispatch_isr |
---|
187 | |
---|
188 | /* |
---|
189 | * disable interrupts_again |
---|
190 | */ |
---|
191 | cli |
---|
192 | |
---|
193 | movl ARG_OFF(esp), ecx /* grab vector arg from stack */ |
---|
194 | |
---|
195 | /* |
---|
196 | * Restore stack. This moves back to the task stack |
---|
197 | * when all interrupts are unnested. |
---|
198 | */ |
---|
199 | movl ebp, esp |
---|
200 | |
---|
201 | /* |
---|
202 | * restore the original i8259 masks |
---|
203 | */ |
---|
204 | /* Do not touch 8259 interrupts if this isn't from one */ |
---|
205 | cmp ecx, 16 /* is this a PIC IRQ? */ |
---|
206 | jge .dont_restore_i8259 |
---|
207 | |
---|
208 | movl MSK_OFF(esp), eax |
---|
209 | movw ax, SYM (i8259s_cache) |
---|
210 | outb $PIC_MASTER_IMR_IO_PORT |
---|
211 | movb ah, al |
---|
212 | outb $PIC_SLAVE_IMR_IO_PORT |
---|
213 | .dont_restore_i8259: |
---|
214 | |
---|
215 | |
---|
216 | #if defined(RTEMS_SMP) && defined(BSP_HAS_SMP) |
---|
217 | call SYM(_ISR_SMP_Exit) |
---|
218 | testl eax, eax |
---|
219 | je .exit |
---|
220 | #else |
---|
221 | decl PER_CPU_ISR_NEST_LEVEL(ebx) /* one less ISR nest level */ |
---|
222 | /* If interrupts are nested, */ |
---|
223 | /* then dispatching is disabled */ |
---|
224 | |
---|
225 | decl SYM (_Thread_Dispatch_disable_level) |
---|
226 | /* unnest multitasking */ |
---|
227 | /* Is dispatch disabled */ |
---|
228 | jne .exit /* Yes, then exit */ |
---|
229 | |
---|
230 | cmpb $0, PER_CPU_DISPATCH_NEEDED(ebx) |
---|
231 | /* Is task switch necessary? */ |
---|
232 | jne .schedule /* Yes, then call the scheduler */ |
---|
233 | jmp .exit /* No, exit */ |
---|
234 | #endif |
---|
235 | |
---|
236 | .schedule: |
---|
237 | /* |
---|
238 | * the scratch registers have already been saved and we are already |
---|
239 | * back on the thread system stack. So we can call _Thread_Dispatch |
---|
240 | * directly |
---|
241 | */ |
---|
242 | call _Thread_Dispatch |
---|
243 | /* |
---|
244 | * fall through exit to restore complete contex (scratch registers |
---|
245 | * eip, CS, Flags). |
---|
246 | */ |
---|
247 | .exit: |
---|
248 | |
---|
249 | #ifdef __SSE__ |
---|
250 | fwait |
---|
251 | fxrstor SSE_OFF(esp) |
---|
252 | #endif |
---|
253 | |
---|
254 | /* restore ebx, ebp and original esp */ |
---|
255 | addl $EBX_OFF, esp |
---|
256 | popl ebx |
---|
257 | popl ebp |
---|
258 | popl esp |
---|
259 | |
---|
260 | /* |
---|
261 | * BEGINNING OF DE-ESTABLISH SEGMENTS |
---|
262 | * |
---|
263 | * NOTE: Make sure there is code here if code is added to |
---|
264 | * load the segment registers. |
---|
265 | * |
---|
266 | */ |
---|
267 | |
---|
268 | /******* DE-ESTABLISH SEGMENTS CODE GOES HERE ********/ |
---|
269 | |
---|
270 | /* |
---|
271 | * END OF DE-ESTABLISH SEGMENTS |
---|
272 | */ |
---|
273 | popl edx |
---|
274 | popl ecx |
---|
275 | popl eax |
---|
276 | iret |
---|
277 | |
---|
278 | #define DISTINCT_INTERRUPT_ENTRY(_vector) \ |
---|
279 | .p2align 4 ; \ |
---|
280 | PUBLIC (rtems_irq_prologue_ ## _vector ) ; \ |
---|
281 | SYM (rtems_irq_prologue_ ## _vector ): \ |
---|
282 | pushl eax ; \ |
---|
283 | pushl ecx ; \ |
---|
284 | pushl edx ; \ |
---|
285 | movl $ _vector, ecx ; \ |
---|
286 | jmp SYM (_ISR_Handler) ; |
---|
287 | |
---|
288 | DISTINCT_INTERRUPT_ENTRY(0) |
---|
289 | DISTINCT_INTERRUPT_ENTRY(1) |
---|
290 | DISTINCT_INTERRUPT_ENTRY(2) |
---|
291 | DISTINCT_INTERRUPT_ENTRY(3) |
---|
292 | DISTINCT_INTERRUPT_ENTRY(4) |
---|
293 | DISTINCT_INTERRUPT_ENTRY(5) |
---|
294 | DISTINCT_INTERRUPT_ENTRY(6) |
---|
295 | DISTINCT_INTERRUPT_ENTRY(7) |
---|
296 | DISTINCT_INTERRUPT_ENTRY(8) |
---|
297 | DISTINCT_INTERRUPT_ENTRY(9) |
---|
298 | DISTINCT_INTERRUPT_ENTRY(10) |
---|
299 | DISTINCT_INTERRUPT_ENTRY(11) |
---|
300 | DISTINCT_INTERRUPT_ENTRY(12) |
---|
301 | DISTINCT_INTERRUPT_ENTRY(13) |
---|
302 | DISTINCT_INTERRUPT_ENTRY(14) |
---|
303 | DISTINCT_INTERRUPT_ENTRY(15) |
---|
304 | DISTINCT_INTERRUPT_ENTRY(16) |
---|
305 | |
---|
306 | /* |
---|
307 | * routine used to initialize the IDT by default |
---|
308 | */ |
---|
309 | |
---|
310 | PUBLIC (default_raw_idt_handler) |
---|
311 | PUBLIC (raw_idt_notify) |
---|
312 | |
---|
313 | SYM (default_raw_idt_handler): |
---|
314 | pusha |
---|
315 | cld |
---|
316 | mov esp, ebp |
---|
317 | andl $ - CPU_STACK_ALIGNMENT, esp |
---|
318 | call raw_idt_notify |
---|
319 | mov ebp, esp |
---|
320 | popa |
---|
321 | iret |
---|
322 | |
---|
323 | END_CODE |
---|
324 | |
---|
325 | END |
---|