1 | /* cpu_asm.s |
---|
2 | * |
---|
3 | * This file contains the basic algorithms for all assembly code used |
---|
4 | * in an specific CPU port of RTEMS. These algorithms must be implemented |
---|
5 | * in assembly language. |
---|
6 | * |
---|
7 | * COPYRIGHT (c) 1989-2011. |
---|
8 | * On-Line Applications Research Corporation (OAR). |
---|
9 | * |
---|
10 | * The license and distribution terms for this file may be |
---|
11 | * found in the file LICENSE in this distribution or at |
---|
12 | * http://www.rtems.com/license/LICENSE. |
---|
13 | * |
---|
14 | * Ported to ERC32 implementation of the SPARC by On-Line Applications |
---|
15 | * Research Corporation (OAR) under contract to the European Space |
---|
16 | * Agency (ESA). |
---|
17 | * |
---|
18 | * ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995. |
---|
19 | * European Space Agency. |
---|
20 | * |
---|
21 | * $Id$ |
---|
22 | */ |
---|
23 | |
---|
24 | #include <rtems/asm.h> |
---|
25 | #include <rtems/system.h> |
---|
26 | #include <bspopts.h> |
---|
27 | |
---|
28 | /* |
---|
29 | * void _ISR_Handler() |
---|
30 | * |
---|
31 | * This routine provides the RTEMS interrupt management. |
---|
32 | * |
---|
33 | * We enter this handler from the 4 instructions in the trap table with |
---|
34 | * the following registers assumed to be set as shown: |
---|
35 | * |
---|
36 | * l0 = PSR |
---|
37 | * l1 = PC |
---|
38 | * l2 = nPC |
---|
39 | * l3 = trap type |
---|
40 | * |
---|
41 | * NOTE: By an executive defined convention, trap type is between 0 and 255 if |
---|
42 | * it is an asynchonous trap and 256 and 511 if it is synchronous. |
---|
43 | */ |
---|
44 | |
---|
45 | .align 4 |
---|
46 | PUBLIC(_ISR_Handler) |
---|
47 | SYM(_ISR_Handler): |
---|
48 | /* |
---|
49 | * Fix the return address for synchronous traps. |
---|
50 | */ |
---|
51 | |
---|
52 | andcc %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0 |
---|
53 | ! Is this a synchronous trap? |
---|
54 | be,a win_ovflow ! No, then skip the adjustment |
---|
55 | nop ! DELAY |
---|
56 | mov %l1, %l6 ! save trapped pc for debug info |
---|
57 | mov %l2, %l1 ! do not return to the instruction |
---|
58 | add %l2, 4, %l2 ! indicated |
---|
59 | |
---|
60 | win_ovflow: |
---|
61 | /* |
---|
62 | * Save the globals this block uses. |
---|
63 | * |
---|
64 | * These registers are not restored from the locals. Their contents |
---|
65 | * are saved directly from the locals into the ISF below. |
---|
66 | */ |
---|
67 | |
---|
68 | mov %g4, %l4 ! save the globals this block uses |
---|
69 | mov %g5, %l5 |
---|
70 | |
---|
71 | /* |
---|
72 | * When at a "window overflow" trap, (wim == (1 << cwp)). |
---|
73 | * If we get here like that, then process a window overflow. |
---|
74 | */ |
---|
75 | |
---|
76 | rd %wim, %g4 |
---|
77 | srl %g4, %l0, %g5 ! g5 = win >> cwp ; shift count and CWP |
---|
78 | ! are LS 5 bits ; how convenient :) |
---|
79 | cmp %g5, 1 ! Is this an invalid window? |
---|
80 | bne dont_do_the_window ! No, then skip all this stuff |
---|
81 | ! we are using the delay slot |
---|
82 | |
---|
83 | /* |
---|
84 | * The following is same as a 1 position right rotate of WIM |
---|
85 | */ |
---|
86 | |
---|
87 | srl %g4, 1, %g5 ! g5 = WIM >> 1 |
---|
88 | sll %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4 |
---|
89 | ! g4 = WIM << (Number Windows - 1) |
---|
90 | or %g4, %g5, %g4 ! g4 = (WIM >> 1) | |
---|
91 | ! (WIM << (Number Windows - 1)) |
---|
92 | |
---|
93 | /* |
---|
94 | * At this point: |
---|
95 | * |
---|
96 | * g4 = the new WIM |
---|
97 | * g5 is free |
---|
98 | */ |
---|
99 | |
---|
100 | /* |
---|
101 | * Since we are tinkering with the register windows, we need to |
---|
102 | * make sure that all the required information is in global registers. |
---|
103 | */ |
---|
104 | |
---|
105 | save ! Save into the window |
---|
106 | wr %g4, 0, %wim ! WIM = new WIM |
---|
107 | nop ! delay slots |
---|
108 | nop |
---|
109 | nop |
---|
110 | |
---|
111 | /* |
---|
112 | * Now save the window just as if we overflowed to it. |
---|
113 | */ |
---|
114 | |
---|
115 | std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET] |
---|
116 | std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET] |
---|
117 | std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET] |
---|
118 | std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET] |
---|
119 | |
---|
120 | std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET] |
---|
121 | std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET] |
---|
122 | std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET] |
---|
123 | std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET] |
---|
124 | |
---|
125 | restore |
---|
126 | nop |
---|
127 | |
---|
128 | dont_do_the_window: |
---|
129 | /* |
---|
130 | * Global registers %g4 and %g5 are saved directly from %l4 and |
---|
131 | * %l5 directly into the ISF below. |
---|
132 | */ |
---|
133 | |
---|
134 | save_isf: |
---|
135 | |
---|
136 | /* |
---|
137 | * Save the state of the interrupted task -- especially the global |
---|
138 | * registers -- in the Interrupt Stack Frame. Note that the ISF |
---|
139 | * includes a regular minimum stack frame which will be used if |
---|
140 | * needed by register window overflow and underflow handlers. |
---|
141 | * |
---|
142 | * REGISTERS SAME AS AT _ISR_Handler |
---|
143 | */ |
---|
144 | |
---|
145 | sub %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp |
---|
146 | ! make space for ISF |
---|
147 | |
---|
148 | std %l0, [%sp + ISF_PSR_OFFSET] ! save psr, PC |
---|
149 | st %l2, [%sp + ISF_NPC_OFFSET] ! save nPC |
---|
150 | st %g1, [%sp + ISF_G1_OFFSET] ! save g1 |
---|
151 | std %g2, [%sp + ISF_G2_OFFSET] ! save g2, g3 |
---|
152 | std %l4, [%sp + ISF_G4_OFFSET] ! save g4, g5 -- see above |
---|
153 | std %g6, [%sp + ISF_G6_OFFSET] ! save g6, g7 |
---|
154 | |
---|
155 | std %i0, [%sp + ISF_I0_OFFSET] ! save i0, i1 |
---|
156 | std %i2, [%sp + ISF_I2_OFFSET] ! save i2, i3 |
---|
157 | std %i4, [%sp + ISF_I4_OFFSET] ! save i4, i5 |
---|
158 | std %i6, [%sp + ISF_I6_FP_OFFSET] ! save i6/fp, i7 |
---|
159 | |
---|
160 | rd %y, %g1 |
---|
161 | st %g1, [%sp + ISF_Y_OFFSET] ! save y |
---|
162 | st %l6, [%sp + ISF_TPC_OFFSET] ! save real trapped pc |
---|
163 | |
---|
164 | mov %sp, %o1 ! 2nd arg to ISR Handler |
---|
165 | |
---|
166 | /* |
---|
167 | * Check if we have an external interrupt (trap 0x11 - 0x1f). If so, |
---|
168 | * set the PIL in the %psr to mask off interrupts with lower priority. |
---|
169 | * The original %psr in %l0 is not modified since it will be restored |
---|
170 | * when the interrupt handler returns. |
---|
171 | */ |
---|
172 | |
---|
173 | mov %l0, %g5 |
---|
174 | and %l3, 0x0ff, %g4 |
---|
175 | |
---|
176 | /* This is a fix for ERC32 with FPU rev.B or rev.C */ |
---|
177 | |
---|
178 | #if defined(FPU_REVB) |
---|
179 | |
---|
180 | |
---|
181 | subcc %g4, 0x08, %g0 |
---|
182 | be fpu_revb |
---|
183 | subcc %g4, 0x11, %g0 |
---|
184 | bl dont_fix_pil |
---|
185 | subcc %g4, 0x1f, %g0 |
---|
186 | bg dont_fix_pil |
---|
187 | sll %g4, 8, %g4 |
---|
188 | and %g4, SPARC_PSR_PIL_MASK, %g4 |
---|
189 | andn %l0, SPARC_PSR_PIL_MASK, %g5 |
---|
190 | or %g4, %g5, %g5 |
---|
191 | srl %l0, 12, %g4 |
---|
192 | andcc %g4, 1, %g0 |
---|
193 | be dont_fix_pil |
---|
194 | nop |
---|
195 | ba,a enable_irq |
---|
196 | |
---|
197 | |
---|
198 | fpu_revb: |
---|
199 | srl %l0, 12, %g4 ! check if EF is set in %psr |
---|
200 | andcc %g4, 1, %g0 |
---|
201 | be dont_fix_pil ! if FPU disabled than continue as normal |
---|
202 | and %l3, 0xff, %g4 |
---|
203 | subcc %g4, 0x08, %g0 |
---|
204 | bne enable_irq ! if not a FPU exception then do two fmovs |
---|
205 | set __sparc_fq, %g4 |
---|
206 | st %fsr, [%g4] ! if FQ is not empty and FQ[1] = fmovs |
---|
207 | ld [%g4], %g4 ! than this is bug 3.14 |
---|
208 | srl %g4, 13, %g4 |
---|
209 | andcc %g4, 1, %g0 |
---|
210 | be dont_fix_pil |
---|
211 | set __sparc_fq, %g4 |
---|
212 | std %fq, [%g4] |
---|
213 | ld [%g4+4], %g4 |
---|
214 | set 0x81a00020, %g5 |
---|
215 | subcc %g4, %g5, %g0 |
---|
216 | bne,a dont_fix_pil2 |
---|
217 | wr %l0, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** |
---|
218 | ba,a simple_return |
---|
219 | |
---|
220 | enable_irq: |
---|
221 | or %g5, SPARC_PSR_PIL_MASK, %g4 |
---|
222 | wr %g4, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** |
---|
223 | nop; nop; nop |
---|
224 | fmovs %f0, %f0 |
---|
225 | ba dont_fix_pil |
---|
226 | fmovs %f0, %f0 |
---|
227 | |
---|
228 | .data |
---|
229 | .global __sparc_fq |
---|
230 | .align 8 |
---|
231 | __sparc_fq: |
---|
232 | .word 0,0 |
---|
233 | |
---|
234 | .text |
---|
235 | /* end of ERC32 FPU rev.B/C fix */ |
---|
236 | |
---|
237 | #else |
---|
238 | |
---|
239 | subcc %g4, 0x11, %g0 |
---|
240 | bl dont_fix_pil |
---|
241 | subcc %g4, 0x1f, %g0 |
---|
242 | bg dont_fix_pil |
---|
243 | sll %g4, 8, %g4 |
---|
244 | and %g4, SPARC_PSR_PIL_MASK, %g4 |
---|
245 | andn %l0, SPARC_PSR_PIL_MASK, %g5 |
---|
246 | ba pil_fixed |
---|
247 | or %g4, %g5, %g5 |
---|
248 | #endif |
---|
249 | |
---|
250 | dont_fix_pil: |
---|
251 | or %g5, SPARC_PSR_PIL_MASK, %g5 |
---|
252 | pil_fixed: |
---|
253 | wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** |
---|
254 | dont_fix_pil2: |
---|
255 | |
---|
256 | PUBLIC(_ISR_PER_CPU) |
---|
257 | SYM(_ISR_PER_CPU): |
---|
258 | |
---|
259 | #if defined(RTEMS_SMP) |
---|
260 | sethi %hi(_Per_CPU_Information_p), %l5 |
---|
261 | add %l5, %lo(_Per_CPU_Information_p), %l5 |
---|
262 | #if BSP_LEON3_SMP |
---|
263 | /* LEON3 SMP support */ |
---|
264 | rd %asr17, %l7 |
---|
265 | srl %l7, 28, %l7 /* CPU number is upper 4 bits so shift */ |
---|
266 | sll %l7, 2, %l7 /* l7 = offset */ |
---|
267 | add %l5, %l7, %l5 |
---|
268 | #endif |
---|
269 | ld [%l5], %l5 /* l5 = pointer to per CPU */ |
---|
270 | nop |
---|
271 | nop |
---|
272 | |
---|
273 | /* |
---|
274 | * On multi-core system, we need to use SMP safe versions |
---|
275 | * of ISR and Thread Dispatch critical sections. |
---|
276 | * |
---|
277 | * _ISR_SMP_Enter returns the interrupt nest level. If we are |
---|
278 | * outermost interrupt, then we need to switch stacks. |
---|
279 | */ |
---|
280 | mov %sp, %fp |
---|
281 | call SYM(_ISR_SMP_Enter), 0 |
---|
282 | nop ! delay slot |
---|
283 | cmp %o0, 0 |
---|
284 | #else |
---|
285 | /* |
---|
286 | * On single core system, we can directly use variables. |
---|
287 | * |
---|
288 | * Increment ISR nest level and Thread dispatch disable level. |
---|
289 | * |
---|
290 | * Register usage for this section: |
---|
291 | * |
---|
292 | * l4 = _Thread_Dispatch_disable_level pointer |
---|
293 | * l5 = _ISR_Nest_level pointer |
---|
294 | * l6 = _Thread_Dispatch_disable_level value |
---|
295 | * l7 = _ISR_Nest_level value |
---|
296 | * |
---|
297 | * NOTE: It is assumed that l4 - l7 will be preserved until the ISR |
---|
298 | * nest and thread dispatch disable levels are unnested. |
---|
299 | */ |
---|
300 | sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4 |
---|
301 | ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6 |
---|
302 | |
---|
303 | sethi %hi(_Per_CPU_Information), %l5 |
---|
304 | add %l5, %lo(_Per_CPU_Information), %l5 |
---|
305 | |
---|
306 | ld [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7 |
---|
307 | |
---|
308 | add %l6, 1, %l6 |
---|
309 | st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))] |
---|
310 | |
---|
311 | add %l7, 1, %l7 |
---|
312 | st %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL] |
---|
313 | |
---|
314 | /* |
---|
315 | * If ISR nest level was zero (now 1), then switch stack. |
---|
316 | */ |
---|
317 | mov %sp, %fp |
---|
318 | subcc %l7, 1, %l7 ! outermost interrupt handler? |
---|
319 | #endif |
---|
320 | |
---|
321 | /* |
---|
322 | * Do we need to switch to the interrupt stack? |
---|
323 | */ |
---|
324 | bnz dont_switch_stacks ! No, then do not switch stacks |
---|
325 | ld [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp |
---|
326 | |
---|
327 | dont_switch_stacks: |
---|
328 | /* |
---|
329 | * Make sure we have a place on the stack for the window overflow |
---|
330 | * trap handler to write into. At this point it is safe to |
---|
331 | * enable traps again. |
---|
332 | */ |
---|
333 | |
---|
334 | sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp |
---|
335 | |
---|
336 | /* |
---|
337 | * Vector to user's handler. |
---|
338 | * |
---|
339 | * NOTE: TBR may no longer have vector number in it since |
---|
340 | * we just enabled traps. It is definitely in l3. |
---|
341 | */ |
---|
342 | |
---|
343 | sethi %hi(SYM(_ISR_Vector_table)), %g4 |
---|
344 | ld [%g4+%lo(SYM(_ISR_Vector_table))], %g4 |
---|
345 | and %l3, 0xFF, %g5 ! remove synchronous trap indicator |
---|
346 | sll %g5, 2, %g5 ! g5 = offset into table |
---|
347 | ld [%g4 + %g5], %g4 ! g4 = _ISR_Vector_table[ vector ] |
---|
348 | |
---|
349 | |
---|
350 | ! o1 = 2nd arg = address of the ISF |
---|
351 | ! WAS LOADED WHEN ISF WAS SAVED!!! |
---|
352 | mov %l3, %o0 ! o0 = 1st arg = vector number |
---|
353 | call %g4, 0 |
---|
354 | nop ! delay slot |
---|
355 | |
---|
356 | #if defined(RTEMS_SMP) |
---|
357 | call SYM(_ISR_SMP_Exit), 0 |
---|
358 | nop ! delay slot |
---|
359 | cmp %o0, 0 |
---|
360 | bz simple_return |
---|
361 | #else |
---|
362 | !sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4 |
---|
363 | !ld [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7 |
---|
364 | !ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6 |
---|
365 | #endif |
---|
366 | |
---|
367 | /* |
---|
368 | * Redisable traps so we can finish up the interrupt processing. |
---|
369 | * This is a VERY conservative place to do this. |
---|
370 | * |
---|
371 | * NOTE: %l0 has the PSR which was in place when we took the trap. |
---|
372 | */ |
---|
373 | |
---|
374 | mov %l0, %psr ! **** DISABLE TRAPS **** |
---|
375 | nop; nop; nop |
---|
376 | |
---|
377 | #if !defined(RTEMS_SMP) |
---|
378 | /* |
---|
379 | * Decrement ISR nest level and Thread dispatch disable level. |
---|
380 | * |
---|
381 | * Register usage for this section: |
---|
382 | * |
---|
383 | * l4 = _Thread_Dispatch_disable_level pointer |
---|
384 | * l5 = _ISR_Nest_level pointer |
---|
385 | * l6 = _Thread_Dispatch_disable_level value |
---|
386 | * l7 = _ISR_Nest_level value |
---|
387 | */ |
---|
388 | |
---|
389 | sub %l6, 1, %l6 |
---|
390 | st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))] |
---|
391 | |
---|
392 | st %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL] |
---|
393 | |
---|
394 | /* |
---|
395 | * If dispatching is disabled (includes nested interrupt case), |
---|
396 | * then do a "simple" exit. |
---|
397 | */ |
---|
398 | |
---|
399 | orcc %l6, %g0, %g0 ! Is dispatching disabled? |
---|
400 | bnz simple_return ! Yes, then do a "simple" exit |
---|
401 | ! NOTE: Use the delay slot |
---|
402 | sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %l6 |
---|
403 | |
---|
404 | ! Are we dispatching from a previous ISR in the interrupted thread? |
---|
405 | ld [%l6 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %l7 |
---|
406 | orcc %l7, %g0, %g0 ! Is this thread already doing an ISR? |
---|
407 | bnz simple_return ! Yes, then do a "simple" exit |
---|
408 | nop |
---|
409 | |
---|
410 | /* |
---|
411 | * If a context switch is necessary, then do fudge stack to |
---|
412 | * return to the interrupt dispatcher. |
---|
413 | */ |
---|
414 | |
---|
415 | ldub [%l5 + PER_CPU_DISPATCH_NEEDED], %l5 |
---|
416 | nop |
---|
417 | nop |
---|
418 | |
---|
419 | orcc %l5, %g0, %g0 ! Is thread switch necessary? |
---|
420 | bz simple_return ! No, then return |
---|
421 | #endif |
---|
422 | /* |
---|
423 | * Invoke interrupt dispatcher. |
---|
424 | */ |
---|
425 | |
---|
426 | PUBLIC(_ISR_Dispatch) |
---|
427 | SYM(_ISR_Dispatch): |
---|
428 | ! Set ISR dispatch nesting prevention flag |
---|
429 | mov 1,%l6 |
---|
430 | sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5 |
---|
431 | st %l6,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))] |
---|
432 | |
---|
433 | /* |
---|
434 | * The following subtract should get us back on the interrupted |
---|
435 | * tasks stack and add enough room to invoke the dispatcher. |
---|
436 | * When we enable traps, we are mostly back in the context |
---|
437 | * of the task and subsequent interrupts can operate normally. |
---|
438 | */ |
---|
439 | |
---|
440 | sub %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp |
---|
441 | |
---|
442 | or %l0, SPARC_PSR_ET_MASK, %l7 ! l7 = PSR with ET=1 |
---|
443 | mov %l7, %psr ! **** ENABLE TRAPS **** |
---|
444 | nop |
---|
445 | nop |
---|
446 | nop |
---|
447 | isr_dispatch: |
---|
448 | call SYM(_Thread_Dispatch), 0 |
---|
449 | nop |
---|
450 | |
---|
451 | /* |
---|
452 | * We invoked _Thread_Dispatch in a state similar to the interrupted |
---|
453 | * task. In order to safely be able to tinker with the register |
---|
454 | * windows and get the task back to its pre-interrupt state, |
---|
455 | * we need to disable interrupts disabled so we can safely tinker |
---|
456 | * with the register windowing. In particular, the CWP in the PSR |
---|
457 | * is fragile during this period. (See PR578.) |
---|
458 | */ |
---|
459 | mov 2,%g1 ! syscall (disable interrupts) |
---|
460 | ta 0 ! syscall (disable interrupts) |
---|
461 | |
---|
462 | /* |
---|
463 | * While we had ISR dispatching disabled in this thread, |
---|
464 | * did we miss anything. If so, then we need to do another |
---|
465 | * _Thread_Dispatch before leaving this ISR Dispatch context. |
---|
466 | */ |
---|
467 | |
---|
468 | #if defined(RTEMS_SMP) |
---|
469 | sethi %hi(_Per_CPU_Information_p), %l5 |
---|
470 | ld [%l5 + %lo(_Per_CPU_Information_p)], %l5 |
---|
471 | #if BSP_LEON3_SMP |
---|
472 | /* LEON3 SMP support */ |
---|
473 | rd %asr17, %l7 |
---|
474 | srl %l7, 28, %l7 /* CPU number is upper 4 bits so shift */ |
---|
475 | sll %l7, 2, %l7 /* l7 = offset */ |
---|
476 | add %l5, %l7, %l5 |
---|
477 | #else |
---|
478 | nop |
---|
479 | nop |
---|
480 | #endif |
---|
481 | ld [%l5], %l5 /* l5 = pointer to per CPU */ |
---|
482 | nop |
---|
483 | nop |
---|
484 | #else |
---|
485 | sethi %hi(_Per_CPU_Information), %l5 |
---|
486 | add %l5, %lo(_Per_CPU_Information), %l5 |
---|
487 | #endif |
---|
488 | ldub [%l5 + PER_CPU_DISPATCH_NEEDED], %l5 |
---|
489 | nop |
---|
490 | nop |
---|
491 | |
---|
492 | orcc %l5, %g0, %g0 ! Is thread switch necessary? |
---|
493 | bz allow_nest_again |
---|
494 | nop |
---|
495 | |
---|
496 | ! Yes, then invoke the dispatcher |
---|
497 | dispatchAgain: |
---|
498 | mov 3,%g1 ! syscall (enable interrupts) |
---|
499 | ta 0 ! syscall (enable interrupts) |
---|
500 | ba isr_dispatch |
---|
501 | nop |
---|
502 | |
---|
503 | allow_nest_again: |
---|
504 | |
---|
505 | ! Zero out ISR stack nesting prevention flag |
---|
506 | sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5 |
---|
507 | st %g0,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))] |
---|
508 | |
---|
509 | /* |
---|
510 | * The CWP in place at this point may be different from |
---|
511 | * that which was in effect at the beginning of the ISR if we |
---|
512 | * have been context switched between the beginning of this invocation |
---|
513 | * of _ISR_Handler and this point. Thus the CWP and WIM should |
---|
514 | * not be changed back to their values at ISR entry time. Any |
---|
515 | * changes to the PSR must preserve the CWP. |
---|
516 | */ |
---|
517 | |
---|
518 | simple_return: |
---|
519 | ld [%fp + ISF_Y_OFFSET], %l5 ! restore y |
---|
520 | wr %l5, 0, %y |
---|
521 | |
---|
522 | ldd [%fp + ISF_PSR_OFFSET], %l0 ! restore psr, PC |
---|
523 | ld [%fp + ISF_NPC_OFFSET], %l2 ! restore nPC |
---|
524 | rd %psr, %l3 |
---|
525 | and %l3, SPARC_PSR_CWP_MASK, %l3 ! want "current" CWP |
---|
526 | andn %l0, SPARC_PSR_CWP_MASK, %l0 ! want rest from task |
---|
527 | or %l3, %l0, %l0 ! install it later... |
---|
528 | andn %l0, SPARC_PSR_ET_MASK, %l0 |
---|
529 | |
---|
530 | /* |
---|
531 | * Restore tasks global and out registers |
---|
532 | */ |
---|
533 | |
---|
534 | mov %fp, %g1 |
---|
535 | |
---|
536 | ! g1 is restored later |
---|
537 | ldd [%fp + ISF_G2_OFFSET], %g2 ! restore g2, g3 |
---|
538 | ldd [%fp + ISF_G4_OFFSET], %g4 ! restore g4, g5 |
---|
539 | ldd [%fp + ISF_G6_OFFSET], %g6 ! restore g6, g7 |
---|
540 | |
---|
541 | ldd [%fp + ISF_I0_OFFSET], %i0 ! restore i0, i1 |
---|
542 | ldd [%fp + ISF_I2_OFFSET], %i2 ! restore i2, i3 |
---|
543 | ldd [%fp + ISF_I4_OFFSET], %i4 ! restore i4, i5 |
---|
544 | ldd [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7 |
---|
545 | |
---|
546 | /* |
---|
547 | * Registers: |
---|
548 | * |
---|
549 | * ALL global registers EXCEPT G1 and the input registers have |
---|
550 | * already been restored and thuse off limits. |
---|
551 | * |
---|
552 | * The following is the contents of the local registers: |
---|
553 | * |
---|
554 | * l0 = original psr |
---|
555 | * l1 = return address (i.e. PC) |
---|
556 | * l2 = nPC |
---|
557 | * l3 = CWP |
---|
558 | */ |
---|
559 | |
---|
560 | /* |
---|
561 | * if (CWP + 1) is an invalid window then we need to reload it. |
---|
562 | * |
---|
563 | * WARNING: Traps should now be disabled |
---|
564 | */ |
---|
565 | |
---|
566 | mov %l0, %psr ! **** DISABLE TRAPS **** |
---|
567 | nop |
---|
568 | nop |
---|
569 | nop |
---|
570 | rd %wim, %l4 |
---|
571 | add %l0, 1, %l6 ! l6 = cwp + 1 |
---|
572 | and %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it |
---|
573 | srl %l4, %l6, %l5 ! l5 = win >> cwp + 1 ; shift count |
---|
574 | ! and CWP are conveniently LS 5 bits |
---|
575 | cmp %l5, 1 ! Is tasks window invalid? |
---|
576 | bne good_task_window |
---|
577 | |
---|
578 | /* |
---|
579 | * The following code is the same as a 1 position left rotate of WIM. |
---|
580 | */ |
---|
581 | |
---|
582 | sll %l4, 1, %l5 ! l5 = WIM << 1 |
---|
583 | srl %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4 |
---|
584 | ! l4 = WIM >> (Number Windows - 1) |
---|
585 | or %l4, %l5, %l4 ! l4 = (WIM << 1) | |
---|
586 | ! (WIM >> (Number Windows - 1)) |
---|
587 | |
---|
588 | /* |
---|
589 | * Now restore the window just as if we underflowed to it. |
---|
590 | */ |
---|
591 | |
---|
592 | wr %l4, 0, %wim ! WIM = new WIM |
---|
593 | nop ! must delay after writing WIM |
---|
594 | nop |
---|
595 | nop |
---|
596 | restore ! now into the tasks window |
---|
597 | |
---|
598 | ldd [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0 |
---|
599 | ldd [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2 |
---|
600 | ldd [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4 |
---|
601 | ldd [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6 |
---|
602 | ldd [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0 |
---|
603 | ldd [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2 |
---|
604 | ldd [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4 |
---|
605 | ldd [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6 |
---|
606 | ! reload of sp clobbers ISF |
---|
607 | save ! Back to ISR dispatch window |
---|
608 | |
---|
609 | good_task_window: |
---|
610 | |
---|
611 | mov %l0, %psr ! **** DISABLE TRAPS **** |
---|
612 | nop; nop; nop |
---|
613 | ! and restore condition codes. |
---|
614 | ld [%g1 + ISF_G1_OFFSET], %g1 ! restore g1 |
---|
615 | jmp %l1 ! transfer control and |
---|
616 | rett %l2 ! go back to tasks window |
---|
617 | |
---|
618 | /* end of file */ |
---|