1 | /* cpu_asm.s |
---|
2 | * |
---|
3 | * This file contains the basic algorithms for all assembly code used |
---|
4 | * in an specific CPU port of RTEMS. These algorithms must be implemented |
---|
5 | * in assembly language. |
---|
6 | * |
---|
7 | * COPYRIGHT (c) 1989-2007. |
---|
8 | * On-Line Applications Research Corporation (OAR). |
---|
9 | * |
---|
10 | * The license and distribution terms for this file may be |
---|
11 | * found in the file LICENSE in this distribution or at |
---|
12 | * http://www.rtems.com/license/LICENSE. |
---|
13 | * |
---|
14 | * Ported to ERC32 implementation of the SPARC by On-Line Applications |
---|
15 | * Research Corporation (OAR) under contract to the European Space |
---|
16 | * Agency (ESA). |
---|
17 | * |
---|
18 | * ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995. |
---|
19 | * European Space Agency. |
---|
20 | * |
---|
21 | * Ported to Niagara and UltraSPARC III (US3) implementations of the SPARC-v9. |
---|
22 | * Niagara and US3 modifications of respective RTEMS file: |
---|
23 | * COPYRIGHT (c) 2010. Gedare Bloom. |
---|
24 | * |
---|
25 | * $Id$ |
---|
26 | */ |
---|
27 | |
---|
28 | #include <rtems/asm.h> |
---|
29 | |
---|
30 | |
---|
31 | /* |
---|
32 | * The assembler needs to be told that we know what to do with |
---|
33 | * the global registers. |
---|
34 | */ |
---|
35 | .register %g2, #scratch |
---|
36 | .register %g3, #scratch |
---|
37 | .register %g6, #scratch |
---|
38 | .register %g7, #scratch |
---|
39 | |
---|
40 | |
---|
41 | /* |
---|
42 | * void _ISR_Handler() |
---|
43 | * |
---|
44 | * This routine provides the RTEMS interrupt management. |
---|
45 | * |
---|
46 | * We enter this handler from the 8 instructions in the trap table with |
---|
47 | * the following registers assumed to be set as shown: |
---|
48 | * |
---|
49 | * g4 = tstate (old l0) |
---|
50 | * g2 = trap type (vector) (old l3) |
---|
51 | * |
---|
52 | * NOTE: By an executive defined convention: |
---|
53 | * if trap type is between 0 and 511 it is an asynchronous trap |
---|
54 | * if trap type is between 512 and 1023 it is an asynchonous trap |
---|
55 | */ |
---|
56 | |
---|
57 | .align 4 |
---|
58 | PUBLIC(_ISR_Handler) |
---|
59 | SYM(_ISR_Handler): |
---|
60 | |
---|
61 | /* |
---|
62 | * The ISR is called at TL = 1. |
---|
63 | * On sun4u we use the alternate globals set. |
---|
64 | * |
---|
65 | * On entry: |
---|
66 | * g4 = tstate (from trap table) |
---|
67 | * g2 = trap vector # |
---|
68 | * |
---|
69 | * In either case, note that trap handlers share a register window with |
---|
70 | * the interrupted context, unless we explicitly enter a new window. This |
---|
71 | * differs from Sparc v8, in which a dedicated register window is saved |
---|
72 | * for trap handling. This means we have to avoid overwriting any registers |
---|
73 | * that we don't save. |
---|
74 | * |
---|
75 | */ |
---|
76 | |
---|
77 | |
---|
78 | /* |
---|
79 | * save some or all context on stack |
---|
80 | */ |
---|
81 | |
---|
82 | /* |
---|
83 | * Save the state of the interrupted task -- especially the global |
---|
84 | * registers -- in the Interrupt Stack Frame. Note that the ISF |
---|
85 | * includes a regular minimum stack frame which will be used if |
---|
86 | * needed by register window overflow and underflow handlers. |
---|
87 | * |
---|
88 | * This is slightly wasteful, since the stack already has the window |
---|
89 | * overflow space reserved, but there is no obvious way to ensure |
---|
90 | * we can store the interrupted state and still handle window |
---|
91 | * spill/fill correctly, since there is no room for the ISF. |
---|
92 | * |
---|
93 | */ |
---|
94 | |
---|
95 | /* this is for debugging purposes, make sure that TL = 1, otherwise |
---|
96 | * things might get dicey */ |
---|
97 | rdpr %tl, %g1 |
---|
98 | cmp %g1, 1 |
---|
99 | be 1f |
---|
100 | nop |
---|
101 | |
---|
102 | 0: ba 0b |
---|
103 | nop |
---|
104 | |
---|
105 | 1: |
---|
106 | /* first store the sp of the interrupted task temporarily in g1 */ |
---|
107 | mov %sp, %g1 |
---|
108 | |
---|
109 | sub %sp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp |
---|
110 | ! make space for Stack_Frame||ISF |
---|
111 | |
---|
112 | /* save tstate, tpc, tnpc, pil */ |
---|
113 | stx %g4, [%sp + STACK_BIAS + ISF_TSTATE_OFFSET] |
---|
114 | rdpr %pil, %g3 |
---|
115 | rdpr %tpc, %g4 |
---|
116 | rdpr %tnpc, %g5 |
---|
117 | stx %g3, [%sp + STACK_BIAS + ISF_PIL_OFFSET] |
---|
118 | stx %g4, [%sp + STACK_BIAS + ISF_TPC_OFFSET] |
---|
119 | stx %g5, [%sp + STACK_BIAS + ISF_TNPC_OFFSET] |
---|
120 | stx %g2, [%sp + STACK_BIAS + ISF_TVEC_NUM] |
---|
121 | |
---|
122 | rd %y, %g4 ! save y |
---|
123 | stx %g4, [%sp + STACK_BIAS + ISF_Y_OFFSET] |
---|
124 | |
---|
125 | ! save interrupted frame's output regs |
---|
126 | stx %o0, [%sp + STACK_BIAS + ISF_O0_OFFSET] ! save o0 |
---|
127 | stx %o1, [%sp + STACK_BIAS + ISF_O1_OFFSET] ! save o1 |
---|
128 | stx %o2, [%sp + STACK_BIAS + ISF_O2_OFFSET] ! save o2 |
---|
129 | stx %o3, [%sp + STACK_BIAS + ISF_O3_OFFSET] ! save o3 |
---|
130 | stx %o4, [%sp + STACK_BIAS + ISF_O4_OFFSET] ! save o4 |
---|
131 | stx %o5, [%sp + STACK_BIAS + ISF_O5_OFFSET] ! save o5 |
---|
132 | stx %g1, [%sp + STACK_BIAS + ISF_O6_SP_OFFSET] ! save o6/sp |
---|
133 | stx %o7, [%sp + STACK_BIAS + ISF_O7_OFFSET] ! save o7 |
---|
134 | |
---|
135 | mov %g1, %o5 ! hold the old sp here for now |
---|
136 | mov %g2, %o1 ! we'll need trap # later |
---|
137 | |
---|
138 | /* switch to TL[0] */ |
---|
139 | wrpr %g0, 0, %tl |
---|
140 | |
---|
141 | /* switch to normal globals */ |
---|
142 | #if defined (SUN4U) |
---|
143 | /* the assignment to pstate below will mask out the AG bit */ |
---|
144 | #elif defined (SUN4V) |
---|
145 | wrpr %g0, 0, %gl |
---|
146 | #endif |
---|
147 | /* get pstate to known state */ |
---|
148 | wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK, %pstate |
---|
149 | |
---|
150 | ! save globals |
---|
151 | stx %g1, [%sp + STACK_BIAS + ISF_G1_OFFSET] ! save g1 |
---|
152 | stx %g2, [%sp + STACK_BIAS + ISF_G2_OFFSET] ! save g2 |
---|
153 | stx %g3, [%sp + STACK_BIAS + ISF_G3_OFFSET] ! save g3 |
---|
154 | stx %g4, [%sp + STACK_BIAS + ISF_G4_OFFSET] ! save g4 |
---|
155 | stx %g5, [%sp + STACK_BIAS + ISF_G5_OFFSET] ! save g5 |
---|
156 | stx %g6, [%sp + STACK_BIAS + ISF_G6_OFFSET] ! save g6 |
---|
157 | stx %g7, [%sp + STACK_BIAS + ISF_G7_OFFSET] ! save g7 |
---|
158 | |
---|
159 | |
---|
160 | mov %o1, %g2 ! get the trap # |
---|
161 | mov %o5, %g7 ! store the interrupted %sp (preserve) |
---|
162 | mov %sp, %o1 ! 2nd arg to ISR Handler = address of ISF |
---|
163 | |
---|
164 | /* |
---|
165 | * Increment ISR nest level and Thread dispatch disable level. |
---|
166 | * |
---|
167 | * Register usage for this section: (note, these are used later) |
---|
168 | * |
---|
169 | * g3 = _Thread_Dispatch_disable_level pointer |
---|
170 | * g5 = _Thread_Dispatch_disable_level value (uint32_t) |
---|
171 | * g6 = _ISR_Nest_level pointer |
---|
172 | * g4 = _ISR_Nest_level value (uint32_t) |
---|
173 | * o5 = temp |
---|
174 | * |
---|
175 | * NOTE: It is assumed that g6 - g7 will be preserved until the ISR |
---|
176 | * nest and thread dispatch disable levels are unnested. |
---|
177 | */ |
---|
178 | |
---|
179 | setx SYM(_Thread_Dispatch_disable_level), %o5, %g3 |
---|
180 | lduw [%g3], %g5 |
---|
181 | setx SYM(_ISR_Nest_level), %o5, %g6 |
---|
182 | lduw [%g6], %g4 |
---|
183 | |
---|
184 | add %g5, 1, %g5 |
---|
185 | stuw %g5, [%g3] |
---|
186 | |
---|
187 | add %g4, 1, %g4 |
---|
188 | stuw %g4, [%g6] |
---|
189 | |
---|
190 | /* |
---|
191 | * If ISR nest level was zero (now 1), then switch stack. |
---|
192 | */ |
---|
193 | |
---|
194 | subcc %g4, 1, %g4 ! outermost interrupt handler? |
---|
195 | bnz dont_switch_stacks ! No, then do not switch stacks |
---|
196 | |
---|
197 | setx SYM(_CPU_Interrupt_stack_high), %o5, %g1 |
---|
198 | ldx [%g1], %sp |
---|
199 | |
---|
200 | /* |
---|
201 | * Adjust the stack for the stack bias |
---|
202 | */ |
---|
203 | sub %sp, STACK_BIAS, %sp |
---|
204 | |
---|
205 | /* |
---|
206 | * Make sure we have a place on the stack for the window overflow |
---|
207 | * trap handler to write into. At this point it is safe to |
---|
208 | * enable traps again. |
---|
209 | */ |
---|
210 | |
---|
211 | sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp |
---|
212 | |
---|
213 | dont_switch_stacks: |
---|
214 | /* |
---|
215 | * Check if we have an external interrupt (trap 0x41 - 0x4f). If so, |
---|
216 | * set the PIL to mask off interrupts with lower priority. |
---|
217 | * |
---|
218 | * The original PIL is not modified since it will be restored |
---|
219 | * when the interrupt handler returns. |
---|
220 | */ |
---|
221 | |
---|
222 | and %g2, 0x0ff, %g1 ! is bottom byte of vector number [0x41,0x4f]? |
---|
223 | |
---|
224 | subcc %g1, 0x41, %g0 |
---|
225 | bl dont_fix_pil |
---|
226 | subcc %g1, 0x4f, %g0 |
---|
227 | bg dont_fix_pil |
---|
228 | nop |
---|
229 | wrpr %g0, %g1, %pil |
---|
230 | |
---|
231 | dont_fix_pil: |
---|
232 | /* We need to be careful about enabling traps here. |
---|
233 | * |
---|
234 | * We already stored off the tstate, tpc, and tnpc, and switched to |
---|
235 | * TL = 0, so it should be safe. |
---|
236 | */ |
---|
237 | |
---|
238 | /* zero out g4 so that ofw calls work */ |
---|
239 | mov %g0, %g4 |
---|
240 | |
---|
241 | ! **** ENABLE TRAPS **** |
---|
242 | wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \ |
---|
243 | SPARC_PSTATE_IE_MASK, %pstate |
---|
244 | |
---|
245 | /* |
---|
246 | * Vector to user's handler. |
---|
247 | * |
---|
248 | * NOTE: TBR may no longer have vector number in it since |
---|
249 | * we just enabled traps. It is definitely in g2. |
---|
250 | */ |
---|
251 | setx SYM(_ISR_Vector_table), %o5, %g1 |
---|
252 | ldx [%g1], %g1 |
---|
253 | and %g2, 0x1FF, %o5 ! remove synchronous trap indicator |
---|
254 | sll %o5, 3, %o5 ! o5 = offset into table |
---|
255 | ldx [%g1 + %o5], %g1 ! g1 = _ISR_Vector_table[ vector ] |
---|
256 | |
---|
257 | |
---|
258 | ! o1 = 2nd arg = address of the ISF |
---|
259 | ! WAS LOADED WHEN ISF WAS SAVED!!! |
---|
260 | mov %g2, %o0 ! o0 = 1st arg = vector number |
---|
261 | call %g1, 0 |
---|
262 | nop ! delay slot |
---|
263 | |
---|
264 | /* |
---|
265 | * Redisable traps so we can finish up the interrupt processing. |
---|
266 | * This is a conservative place to do this. |
---|
267 | */ |
---|
268 | ! **** DISABLE TRAPS **** |
---|
269 | wrpr %g0, SPARC_PSTATE_PRIV_MASK, %pstate |
---|
270 | |
---|
271 | /* |
---|
272 | * We may safely use any of the %o and %g registers, because |
---|
273 | * we saved them earlier (and any other interrupt that uses |
---|
274 | * them will also save them). Right now, the state of those |
---|
275 | * registers are as follows: |
---|
276 | * %o registers: unknown (user's handler may have destroyed) |
---|
277 | * %g1,g4,g5: scratch |
---|
278 | * %g2: unknown: was trap vector |
---|
279 | * %g3: uknown: was _Thread_Dispatch_Disable_level pointer |
---|
280 | * %g6: _ISR_Nest_level |
---|
281 | * %g7: interrupted task's sp |
---|
282 | */ |
---|
283 | |
---|
284 | /* |
---|
285 | * Increment ISR nest level and Thread dispatch disable level. |
---|
286 | * |
---|
287 | * Register usage for this section: (note: as used above) |
---|
288 | * |
---|
289 | * g3 = _Thread_Dispatch_disable_level pointer |
---|
290 | * g5 = _Thread_Dispatch_disable_level value |
---|
291 | * g6 = _ISR_Nest_level pointer |
---|
292 | * g4 = _ISR_Nest_level value |
---|
293 | * o5 = temp |
---|
294 | */ |
---|
295 | |
---|
296 | /* We have to re-load the values from memory, because there are |
---|
297 | * not enough registers that we know will be preserved across the |
---|
298 | * user's handler. If this is a problem, we can create a register |
---|
299 | * window for _ISR_Handler. |
---|
300 | */ |
---|
301 | |
---|
302 | setx SYM(_Thread_Dispatch_disable_level), %o5, %g3 |
---|
303 | lduw [%g3],%g5 |
---|
304 | lduw [%g6],%g4 |
---|
305 | sub %g5, 1, %g5 |
---|
306 | stuw %g5, [%g3] |
---|
307 | sub %g4, 1, %g4 |
---|
308 | stuw %g4, [%g6] |
---|
309 | |
---|
310 | orcc %g4, %g0, %g0 ! ISRs still nested? |
---|
311 | bnz dont_restore_stack ! Yes then don't restore stack yet |
---|
312 | nop |
---|
313 | |
---|
314 | /* |
---|
315 | * This is the outermost interrupt handler. Need to get off the |
---|
316 | * CPU Interrupt Stack and back to the tasks stack. |
---|
317 | * |
---|
318 | * The following subtract should get us back on the interrupted |
---|
319 | * tasks stack and add enough room to invoke the dispatcher. |
---|
320 | * When we enable traps, we are mostly back in the context |
---|
321 | * of the task and subsequent interrupts can operate normally. |
---|
322 | * |
---|
323 | * Now %sp points to the bottom of the ISF. |
---|
324 | * |
---|
325 | */ |
---|
326 | |
---|
327 | sub %g7, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp |
---|
328 | |
---|
329 | dont_restore_stack: |
---|
330 | |
---|
331 | /* |
---|
332 | * If dispatching is disabled (includes nested interrupt case), |
---|
333 | * then do a "simple" exit. |
---|
334 | */ |
---|
335 | |
---|
336 | orcc %g5, %g0, %g0 ! Is dispatching disabled? |
---|
337 | bnz simple_return ! Yes, then do a "simple" exit |
---|
338 | ! NOTE: Use the delay slot |
---|
339 | mov %g0, %g4 ! clear g4 for ofw |
---|
340 | |
---|
341 | ! Are we dispatching from a previous ISR in the interrupted thread? |
---|
342 | setx SYM(_CPU_ISR_Dispatch_disable), %o5, %g5 |
---|
343 | lduw [%g5], %o5 |
---|
344 | orcc %o5, %g0, %g0 ! Is this thread already doing an ISR? |
---|
345 | bnz simple_return ! Yes, then do a "simple" exit |
---|
346 | nop |
---|
347 | |
---|
348 | setx SYM(_Context_Switch_necessary), %o5, %g7 |
---|
349 | |
---|
350 | |
---|
351 | /* |
---|
352 | * If a context switch is necessary, then do fudge stack to |
---|
353 | * return to the interrupt dispatcher. |
---|
354 | */ |
---|
355 | |
---|
356 | ldub [%g7], %o5 |
---|
357 | |
---|
358 | orcc %o5, %g0, %g0 ! Is thread switch necessary? |
---|
359 | bnz SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher |
---|
360 | nop |
---|
361 | |
---|
362 | /* |
---|
363 | * Finally, check to see if signals were sent to the currently |
---|
364 | * executing task. If so, we need to invoke the interrupt dispatcher. |
---|
365 | */ |
---|
366 | setx SYM(_ISR_Signals_to_thread_executing), %o5, %g5 |
---|
367 | ldub [%g5], %o5 |
---|
368 | |
---|
369 | orcc %o5, %g0, %g0 ! Were signals sent to the currently |
---|
370 | ! executing thread? |
---|
371 | bz simple_return ! yes, then invoke the dispatcher |
---|
372 | ! use the delay slot to clear the signals |
---|
373 | ! to the currently executing task flag |
---|
374 | stb %g0, [%g5] |
---|
375 | |
---|
376 | |
---|
377 | /* |
---|
378 | * Invoke interrupt dispatcher. |
---|
379 | */ |
---|
380 | PUBLIC(_ISR_Dispatch) |
---|
381 | SYM(_ISR_Dispatch): |
---|
382 | ! Set ISR dispatch nesting prevention flag |
---|
383 | mov 1, %o1 |
---|
384 | setx SYM(_CPU_ISR_Dispatch_disable), %o5, %o2 |
---|
385 | stuw %o1, [%o2] |
---|
386 | |
---|
387 | |
---|
388 | ! **** ENABLE TRAPS **** |
---|
389 | wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \ |
---|
390 | SPARC_PSTATE_IE_MASK, %pstate |
---|
391 | isr_dispatch: |
---|
392 | call SYM(_Thread_Dispatch), 0 |
---|
393 | nop |
---|
394 | |
---|
395 | /* |
---|
396 | * We invoked _Thread_Dispatch in a state similar to the interrupted |
---|
397 | * task. In order to safely be able to tinker with the register |
---|
398 | * windows and get the task back to its pre-interrupt state, |
---|
399 | * we need to disable interrupts. |
---|
400 | */ |
---|
401 | mov 2, %g4 ! syscall (disable interrupts) |
---|
402 | ta 0 ! syscall (disable interrupts) |
---|
403 | mov 0, %g4 |
---|
404 | |
---|
405 | /* |
---|
406 | * While we had ISR dispatching disabled in this thread, |
---|
407 | * did we miss anything. If so, then we need to do another |
---|
408 | * _Thread_Dispatch before leaving this ISR Dispatch context. |
---|
409 | */ |
---|
410 | |
---|
411 | setx SYM(_Context_Switch_necessary), %o5, %o1 |
---|
412 | ldub [%o1], %o2 |
---|
413 | |
---|
414 | ! NOTE: Use some of delay slot to start loading this |
---|
415 | setx SYM(_ISR_Signals_to_thread_executing), %o5, %o1 |
---|
416 | ldub [%o1], %o3 |
---|
417 | |
---|
418 | orcc %o2, %g0, %g0 ! Is thread switch necessary? |
---|
419 | bnz dispatchAgain ! yes, then invoke the dispatcher AGAIN |
---|
420 | ! NOTE: Use the delay slot to catch the orcc below |
---|
421 | |
---|
422 | /* |
---|
423 | * Finally, check to see if signals were sent to the currently |
---|
424 | * executing task. If so, we need to invoke the interrupt dispatcher. |
---|
425 | */ |
---|
426 | |
---|
427 | ! NOTE: Delay slots above were used to perform the load AND |
---|
428 | ! this orcc falls into the delay slot for bnz above |
---|
429 | orcc %o3, %g0, %g0 ! Were signals sent to the currently |
---|
430 | ! executing thread? |
---|
431 | bz allow_nest_again ! No, then clear out and return |
---|
432 | nop |
---|
433 | |
---|
434 | ! Yes, then invoke the dispatcher |
---|
435 | dispatchAgain: |
---|
436 | mov 3, %g4 ! syscall (enable interrupts) |
---|
437 | ta 0 ! syscall (enable interrupts) |
---|
438 | ba isr_dispatch |
---|
439 | mov 0, %g4 |
---|
440 | |
---|
441 | allow_nest_again: |
---|
442 | |
---|
443 | ! Zero out ISR stack nesting prevention flag |
---|
444 | setx SYM(_CPU_ISR_Dispatch_disable), %o5, %o1 |
---|
445 | stuw %g0,[%o1] |
---|
446 | |
---|
447 | /* |
---|
448 | * The CWP in place at this point may be different from |
---|
449 | * that which was in effect at the beginning of the ISR if we |
---|
450 | * have been context switched between the beginning of this invocation |
---|
451 | * of _ISR_Handler and this point. Thus the CWP and WIM should |
---|
452 | * not be changed back to their values at ISR entry time. Any |
---|
453 | * changes to the PSR must preserve the CWP. |
---|
454 | */ |
---|
455 | |
---|
456 | simple_return: |
---|
457 | flushw ! get register windows to a 'clean' state |
---|
458 | |
---|
459 | ! **** DISABLE TRAPS **** |
---|
460 | wrpr %g0, SPARC_PSTATE_PRIV_MASK, %pstate |
---|
461 | |
---|
462 | ldx [%sp + STACK_BIAS + ISF_Y_OFFSET], %o1 ! restore y |
---|
463 | wr %o1, 0, %y |
---|
464 | |
---|
465 | ldx [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1 |
---|
466 | |
---|
467 | ! see if cwp is proper (tstate.cwp == cwp) |
---|
468 | and %g1, 0x1F, %g6 |
---|
469 | rdpr %cwp, %g7 |
---|
470 | cmp %g6, %g7 |
---|
471 | bz good_window |
---|
472 | nop |
---|
473 | |
---|
474 | /* |
---|
475 | * Fix the CWP. Need the cwp to be the proper cwp that |
---|
476 | * gets restored when returning from the trap via retry/done. Do |
---|
477 | * this before reloading the task's output regs. Basically fake a |
---|
478 | * window spill/fill. |
---|
479 | * |
---|
480 | * Is this necessary on sun4v? Why not just re-write |
---|
481 | * tstate.cwp to be equal to the current cwp? |
---|
482 | */ |
---|
483 | mov %sp, %g1 |
---|
484 | stx %l0, [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET] |
---|
485 | stx %l1, [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET] |
---|
486 | stx %l2, [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET] |
---|
487 | stx %l3, [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET] |
---|
488 | stx %l4, [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET] |
---|
489 | stx %l5, [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET] |
---|
490 | stx %l6, [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET] |
---|
491 | stx %l7, [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET] |
---|
492 | stx %i0, [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET] |
---|
493 | stx %i1, [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET] |
---|
494 | stx %i2, [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET] |
---|
495 | stx %i3, [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET] |
---|
496 | stx %i4, [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET] |
---|
497 | stx %i5, [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET] |
---|
498 | stx %i6, [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET] |
---|
499 | stx %i7, [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET] |
---|
500 | wrpr %g0, %g6, %cwp |
---|
501 | mov %g1, %sp |
---|
502 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET], %l0 |
---|
503 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET], %l1 |
---|
504 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET], %l2 |
---|
505 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET], %l3 |
---|
506 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET], %l4 |
---|
507 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET], %l5 |
---|
508 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET], %l6 |
---|
509 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET], %l7 |
---|
510 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET], %i0 |
---|
511 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET], %i1 |
---|
512 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET], %i2 |
---|
513 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET], %i3 |
---|
514 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET], %i4 |
---|
515 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET], %i5 |
---|
516 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET], %i6 |
---|
517 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET], %i7 |
---|
518 | |
---|
519 | |
---|
520 | good_window: |
---|
521 | |
---|
522 | |
---|
523 | /* |
---|
524 | * Restore tasks global and out registers |
---|
525 | */ |
---|
526 | |
---|
527 | ldx [%sp + STACK_BIAS + ISF_G1_OFFSET], %g1 ! restore g1 |
---|
528 | ldx [%sp + STACK_BIAS + ISF_G2_OFFSET], %g2 ! restore g2 |
---|
529 | ldx [%sp + STACK_BIAS + ISF_G3_OFFSET], %g3 ! restore g3 |
---|
530 | ldx [%sp + STACK_BIAS + ISF_G4_OFFSET], %g4 ! restore g4 |
---|
531 | ldx [%sp + STACK_BIAS + ISF_G5_OFFSET], %g5 ! restore g5 |
---|
532 | ldx [%sp + STACK_BIAS + ISF_G6_OFFSET], %g6 ! restore g6 |
---|
533 | ldx [%sp + STACK_BIAS + ISF_G7_OFFSET], %g7 ! restore g7 |
---|
534 | |
---|
535 | |
---|
536 | |
---|
537 | ! return to TL[1], GL[1], and restore TSTATE, TPC, and TNPC |
---|
538 | wrpr %g0, 1, %tl |
---|
539 | |
---|
540 | ! return to GL=1 or AG |
---|
541 | #if defined(SUN4U) |
---|
542 | rdpr %pstate, %g1 |
---|
543 | andn %g1, SPARC_PSTATE_AG_MASK, %g1 |
---|
544 | wrpr %g1, %g0, %pstate ! go to regular global |
---|
545 | #elif defined(SUN4V) |
---|
546 | wrpr %g0, 1, %gl |
---|
547 | #endif |
---|
548 | |
---|
549 | ! now we can use global registers (at gl=1 or AG) |
---|
550 | ldx [%sp + STACK_BIAS + ISF_PIL_OFFSET], %g3 |
---|
551 | ldx [%sp + STACK_BIAS + ISF_TPC_OFFSET], %g4 |
---|
552 | ldx [%sp + STACK_BIAS + ISF_TNPC_OFFSET], %g5 |
---|
553 | ldx [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1 |
---|
554 | ldx [%sp + STACK_BIAS + ISF_TVEC_NUM], %g2 |
---|
555 | wrpr %g0, %g3, %pil |
---|
556 | wrpr %g0, %g4, %tpc |
---|
557 | wrpr %g0, %g5, %tnpc |
---|
558 | |
---|
559 | wrpr %g0, %g1, %tstate |
---|
560 | |
---|
561 | ldx [%sp + STACK_BIAS + ISF_O0_OFFSET], %o0 ! restore o0 |
---|
562 | ldx [%sp + STACK_BIAS + ISF_O1_OFFSET], %o1 ! restore o1 |
---|
563 | ldx [%sp + STACK_BIAS + ISF_O2_OFFSET], %o2 ! restore o2 |
---|
564 | ldx [%sp + STACK_BIAS + ISF_O3_OFFSET], %o3 ! restore o3 |
---|
565 | ldx [%sp + STACK_BIAS + ISF_O4_OFFSET], %o4 ! restore o4 |
---|
566 | ldx [%sp + STACK_BIAS + ISF_O5_OFFSET], %o5 ! restore o5 |
---|
567 | ! sp is restored later |
---|
568 | ldx [%sp + STACK_BIAS + ISF_O7_OFFSET], %o7 ! restore o7 |
---|
569 | |
---|
570 | ldx [%sp + STACK_BIAS + ISF_O6_SP_OFFSET], %o6 ! restore o6/sp |
---|
571 | |
---|
572 | /* |
---|
573 | * Determine whether to re-execute the trapping instruction |
---|
574 | * (asynchronous trap) or to skip the trapping instruction |
---|
575 | * (synchronous trap). |
---|
576 | */ |
---|
577 | |
---|
578 | andcc %g2, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0 |
---|
579 | ! Is this a synchronous trap? |
---|
580 | be not_synch ! No, then skip trapping instruction |
---|
581 | mov 0, %g4 |
---|
582 | retry ! re-execute trapping instruction |
---|
583 | not_synch: |
---|
584 | done ! skip trapping instruction |
---|
585 | |
---|
586 | /* end of file */ |
---|