1 | /* |
---|
2 | * This file contains the implementation of the function described in irq.h |
---|
3 | */ |
---|
4 | |
---|
5 | /* |
---|
6 | * Copyright (C) 1998 valette@crf.canon.fr |
---|
7 | * |
---|
8 | * COPYRIGHT (c) 1989-2011. |
---|
9 | * On-Line Applications Research Corporation (OAR). |
---|
10 | * |
---|
11 | * The license and distribution terms for this file may be |
---|
12 | * found in found in the file LICENSE in this distribution or at |
---|
13 | * http://www.rtems.org/license/LICENSE. |
---|
14 | */ |
---|
15 | |
---|
16 | #include <rtems/asm.h> |
---|
17 | #include <rtems/system.h> |
---|
18 | #include <bspopts.h> |
---|
19 | #include <rtems/score/cpu.h> |
---|
20 | #include <rtems/score/percpu.h> |
---|
21 | |
---|
22 | #include <bsp.h> /* to establish dependency on prototype */ |
---|
23 | |
---|
24 | #ifndef CPU_STACK_ALIGNMENT |
---|
25 | #error "Missing header? CPU_STACK_ALIGNMENT is not defined here" |
---|
26 | #endif |
---|
27 | |
---|
28 | /* Stack frame we use for intermediate storage */ |
---|
29 | #define ARG_OFF 0 |
---|
30 | #define MSK_OFF 4 /* not used any more */ |
---|
31 | #define EBX_OFF 8 /* ebx */ |
---|
32 | #define EBP_OFF 12 /* code restoring ebp/esp relies on */ |
---|
33 | #define ESP_OFF 16 /* esp being on top of ebp! */ |
---|
34 | #ifdef __SSE__ |
---|
35 | /* need to be on 16 byte boundary for SSE, add 12 to do that */ |
---|
36 | #define FRM_SIZ (20+12+512) |
---|
37 | #define SSE_OFF 32 |
---|
38 | #else |
---|
39 | #define FRM_SIZ 20 |
---|
40 | #endif |
---|
41 | |
---|
42 | BEGIN_CODE |
---|
43 | |
---|
44 | SYM (_ISR_Handler): |
---|
45 | /* |
---|
46 | * Before this was point is reached the vectors unique |
---|
47 | * entry point did the following: |
---|
48 | * |
---|
49 | * 1. saved scratch registers registers eax edx ecx" |
---|
50 | * 2. put the vector number in ecx. |
---|
51 | * |
---|
52 | * BEGINNING OF ESTABLISH SEGMENTS |
---|
53 | * |
---|
54 | * WARNING: If an interrupt can occur when the segments are |
---|
55 | * not correct, then this is where we should establish |
---|
56 | * the segments. In addition to establishing the |
---|
57 | * segments, it may be necessary to establish a stack |
---|
58 | * in the current data area on the outermost interrupt. |
---|
59 | * |
---|
60 | * NOTE: If the previous values of the segment registers are |
---|
61 | * pushed, do not forget to adjust SAVED_REGS. |
---|
62 | * |
---|
63 | * NOTE: Make sure the exit code which restores these |
---|
64 | * when this type of code is needed. |
---|
65 | */ |
---|
66 | |
---|
67 | /***** ESTABLISH SEGMENTS CODE GOES HERE ******/ |
---|
68 | |
---|
69 | /* |
---|
70 | * END OF ESTABLISH SEGMENTS |
---|
71 | */ |
---|
72 | |
---|
73 | /* |
---|
74 | * Establish an aligned stack frame |
---|
75 | * original sp |
---|
76 | * saved ebx |
---|
77 | * saved ebp |
---|
78 | * saved irq mask |
---|
79 | * vector arg to BSP_dispatch_isr <- aligned SP |
---|
80 | */ |
---|
81 | movl esp, eax |
---|
82 | subl $FRM_SIZ, esp |
---|
83 | andl $ - CPU_STACK_ALIGNMENT, esp |
---|
84 | movl ebx, EBX_OFF(esp) |
---|
85 | movl eax, ESP_OFF(esp) |
---|
86 | movl ebp, EBP_OFF(esp) |
---|
87 | |
---|
88 | /* |
---|
89 | * GCC versions starting with 4.3 no longer place the cld |
---|
90 | * instruction before string operations. We need to ensure |
---|
91 | * it is set correctly for ISR handlers. |
---|
92 | */ |
---|
93 | cld |
---|
94 | |
---|
95 | #ifdef __SSE__ |
---|
96 | /* NOTE: SSE only is supported if the BSP enables fxsave/fxrstor |
---|
97 | * to save/restore SSE context! This is so far only implemented |
---|
98 | * for pc386!. |
---|
99 | */ |
---|
100 | |
---|
101 | /* We save SSE here (on the task stack) because we possibly |
---|
102 | * call other C-code (besides the ISR, namely _Thread_Dispatch()) |
---|
103 | */ |
---|
104 | /* don't wait here; a possible exception condition will eventually be |
---|
105 | * detected when the task resumes control and executes a FP instruction |
---|
106 | fwait |
---|
107 | */ |
---|
108 | fxsave SSE_OFF(esp) |
---|
109 | fninit /* clean-slate FPU */ |
---|
110 | movl $0x1f80, ARG_OFF(esp) /* use ARG_OFF as scratch space */ |
---|
111 | ldmxcsr ARG_OFF(esp) /* clean-slate MXCSR */ |
---|
112 | #endif |
---|
113 | |
---|
114 | /* |
---|
115 | * Now switch stacks if necessary |
---|
116 | */ |
---|
117 | |
---|
118 | PUBLIC (ISR_STOP) |
---|
119 | ISR_STOP: |
---|
120 | .check_stack_switch: |
---|
121 | movl esp, ebp /* ebp = previous stack pointer */ |
---|
122 | |
---|
123 | #ifdef RTEMS_SMP |
---|
124 | call SYM(_CPU_SMP_Get_current_processor) |
---|
125 | sall $PER_CPU_CONTROL_SIZE_LOG2, eax |
---|
126 | addl $SYM(_Per_CPU_Information), eax |
---|
127 | movl eax, ebx |
---|
128 | #else |
---|
129 | movl $SYM(_Per_CPU_Information), ebx |
---|
130 | #endif |
---|
131 | |
---|
132 | /* is this the outermost interrupt? */ |
---|
133 | cmpl $0, PER_CPU_ISR_NEST_LEVEL(ebx) |
---|
134 | jne nested /* No, then continue */ |
---|
135 | movl PER_CPU_INTERRUPT_STACK_HIGH(ebx), esp |
---|
136 | |
---|
137 | /* |
---|
138 | * We want to insure that the old stack pointer is in ebp |
---|
139 | * By saving it on every interrupt, all we have to do is |
---|
140 | * movl ebp->esp near the end of every interrupt. |
---|
141 | */ |
---|
142 | |
---|
143 | nested: |
---|
144 | incl PER_CPU_ISR_NEST_LEVEL(ebx) /* one nest level deeper */ |
---|
145 | incl PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(ebx) /* disable |
---|
146 | multitasking */ |
---|
147 | /* |
---|
148 | * ECX is preloaded with the vector number; store as arg |
---|
149 | * on top of stack. Note that _CPU_Interrupt_stack_high |
---|
150 | * was adjusted in _CPU_Interrupt_stack_setup() (score/rtems/cpu.h) |
---|
151 | * to make sure there is space. |
---|
152 | */ |
---|
153 | |
---|
154 | movl ecx, ARG_OFF(esp) /* store vector arg in stack */ |
---|
155 | call BSP_dispatch_isr |
---|
156 | |
---|
157 | movl ARG_OFF(esp), ecx /* grab vector arg from stack */ |
---|
158 | |
---|
159 | /* |
---|
160 | * Restore stack. This moves back to the task stack |
---|
161 | * when all interrupts are unnested. |
---|
162 | */ |
---|
163 | movl ebp, esp |
---|
164 | |
---|
165 | decl PER_CPU_ISR_NEST_LEVEL(ebx) /* one less ISR nest level */ |
---|
166 | /* If interrupts are nested, */ |
---|
167 | /* then dispatching is disabled */ |
---|
168 | |
---|
169 | decl PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(ebx) |
---|
170 | /* unnest multitasking */ |
---|
171 | /* Is dispatch disabled */ |
---|
172 | jne .exit /* Yes, then exit */ |
---|
173 | |
---|
174 | cmpb $0, PER_CPU_DISPATCH_NEEDED(ebx) |
---|
175 | /* Is task switch necessary? */ |
---|
176 | jne .schedule /* Yes, then call the scheduler */ |
---|
177 | jmp .exit /* No, exit */ |
---|
178 | |
---|
179 | .schedule: |
---|
180 | /* |
---|
181 | * the scratch registers have already been saved and we are already |
---|
182 | * back on the thread system stack. So we can call _Thread_Dispatch |
---|
183 | * directly |
---|
184 | */ |
---|
185 | call _Thread_Dispatch |
---|
186 | /* |
---|
187 | * fall through exit to restore complete contex (scratch registers |
---|
188 | * eip, CS, Flags). |
---|
189 | */ |
---|
190 | .exit: |
---|
191 | |
---|
192 | #ifdef __SSE__ |
---|
193 | fwait |
---|
194 | fxrstor SSE_OFF(esp) |
---|
195 | #endif |
---|
196 | |
---|
197 | /* restore ebx, ebp and original esp */ |
---|
198 | addl $EBX_OFF, esp |
---|
199 | popl ebx |
---|
200 | popl ebp |
---|
201 | popl esp |
---|
202 | |
---|
203 | /* |
---|
204 | * BEGINNING OF DE-ESTABLISH SEGMENTS |
---|
205 | * |
---|
206 | * NOTE: Make sure there is code here if code is added to |
---|
207 | * load the segment registers. |
---|
208 | * |
---|
209 | */ |
---|
210 | |
---|
211 | /******* DE-ESTABLISH SEGMENTS CODE GOES HERE ********/ |
---|
212 | |
---|
213 | /* |
---|
214 | * END OF DE-ESTABLISH SEGMENTS |
---|
215 | */ |
---|
216 | popl edx |
---|
217 | popl ecx |
---|
218 | popl eax |
---|
219 | iret |
---|
220 | |
---|
221 | #define DISTINCT_INTERRUPT_ENTRY(_vector) \ |
---|
222 | .p2align 4 ; \ |
---|
223 | PUBLIC (rtems_irq_prologue_ ## _vector ) ; \ |
---|
224 | SYM (rtems_irq_prologue_ ## _vector ): \ |
---|
225 | pushl eax ; \ |
---|
226 | pushl ecx ; \ |
---|
227 | pushl edx ; \ |
---|
228 | movl $ _vector, ecx ; \ |
---|
229 | jmp SYM (_ISR_Handler) ; |
---|
230 | |
---|
231 | DISTINCT_INTERRUPT_ENTRY(0) |
---|
232 | DISTINCT_INTERRUPT_ENTRY(1) |
---|
233 | DISTINCT_INTERRUPT_ENTRY(2) |
---|
234 | DISTINCT_INTERRUPT_ENTRY(3) |
---|
235 | DISTINCT_INTERRUPT_ENTRY(4) |
---|
236 | DISTINCT_INTERRUPT_ENTRY(5) |
---|
237 | DISTINCT_INTERRUPT_ENTRY(6) |
---|
238 | DISTINCT_INTERRUPT_ENTRY(7) |
---|
239 | DISTINCT_INTERRUPT_ENTRY(8) |
---|
240 | DISTINCT_INTERRUPT_ENTRY(9) |
---|
241 | DISTINCT_INTERRUPT_ENTRY(10) |
---|
242 | DISTINCT_INTERRUPT_ENTRY(11) |
---|
243 | DISTINCT_INTERRUPT_ENTRY(12) |
---|
244 | DISTINCT_INTERRUPT_ENTRY(13) |
---|
245 | DISTINCT_INTERRUPT_ENTRY(14) |
---|
246 | DISTINCT_INTERRUPT_ENTRY(15) |
---|
247 | DISTINCT_INTERRUPT_ENTRY(16) |
---|
248 | |
---|
249 | /* |
---|
250 | * routine used to initialize the IDT by default |
---|
251 | */ |
---|
252 | |
---|
253 | PUBLIC (default_raw_idt_handler) |
---|
254 | PUBLIC (raw_idt_notify) |
---|
255 | |
---|
256 | SYM (default_raw_idt_handler): |
---|
257 | pusha |
---|
258 | cld |
---|
259 | mov esp, ebp |
---|
260 | andl $ - CPU_STACK_ALIGNMENT, esp |
---|
261 | call raw_idt_notify |
---|
262 | mov ebp, esp |
---|
263 | popa |
---|
264 | iret |
---|
265 | |
---|
266 | END_CODE |
---|
267 | |
---|
268 | END |
---|