1 | /* cpu_asm.s |
---|
2 | * |
---|
3 | * This file contains the basic algorithms for all assembly code used |
---|
4 | * in an specific CPU port of RTEMS. These algorithms must be implemented |
---|
5 | * in assembly language. |
---|
6 | * |
---|
7 | * COPYRIGHT (c) 1989-2007. On-Line Applications Research Corporation (OAR). |
---|
8 | * COPYRIGHT (c) 2010. Gedare Bloom. |
---|
9 | * |
---|
10 | * The license and distribution terms for this file may be |
---|
11 | * found in the file LICENSE in this distribution or at |
---|
12 | * http://www.rtems.org/license/LICENSE. |
---|
13 | */ |
---|
14 | |
---|
15 | #include <rtems/asm.h> |
---|
16 | #include <rtems/score/percpu.h> |
---|
17 | |
---|
18 | |
---|
19 | /* |
---|
20 | * The assembler needs to be told that we know what to do with |
---|
21 | * the global registers. |
---|
22 | */ |
---|
23 | .register %g2, #scratch |
---|
24 | .register %g3, #scratch |
---|
25 | .register %g6, #scratch |
---|
26 | .register %g7, #scratch |
---|
27 | |
---|
28 | |
---|
29 | /* |
---|
30 | * void _ISR_Handler() |
---|
31 | * |
---|
32 | * This routine provides the RTEMS interrupt management. |
---|
33 | * |
---|
34 | * We enter this handler from the 8 instructions in the trap table with |
---|
35 | * the following registers assumed to be set as shown: |
---|
36 | * |
---|
37 | * g4 = tstate (old l0) |
---|
38 | * g2 = trap type (vector) (old l3) |
---|
39 | * |
---|
40 | * NOTE: By an executive defined convention: |
---|
41 | * if trap type is between 0 and 511 it is an asynchronous trap |
---|
42 | * if trap type is between 512 and 1023 it is an asynchonous trap |
---|
43 | */ |
---|
44 | |
---|
45 | .align 4 |
---|
46 | PUBLIC(_ISR_Handler) |
---|
47 | SYM(_ISR_Handler): |
---|
48 | |
---|
49 | /* |
---|
50 | * The ISR is called at TL = 1. |
---|
51 | * On sun4u we use the alternate globals set. |
---|
52 | * |
---|
53 | * On entry: |
---|
54 | * g4 = tstate (from trap table) |
---|
55 | * g2 = trap vector # |
---|
56 | * |
---|
57 | * In either case, note that trap handlers share a register window with |
---|
58 | * the interrupted context, unless we explicitly enter a new window. This |
---|
59 | * differs from Sparc v8, in which a dedicated register window is saved |
---|
60 | * for trap handling. This means we have to avoid overwriting any registers |
---|
61 | * that we don't save. |
---|
62 | * |
---|
63 | */ |
---|
64 | |
---|
65 | |
---|
66 | /* |
---|
67 | * save some or all context on stack |
---|
68 | */ |
---|
69 | |
---|
70 | /* |
---|
71 | * Save the state of the interrupted task -- especially the global |
---|
72 | * registers -- in the Interrupt Stack Frame. Note that the ISF |
---|
73 | * includes a regular minimum stack frame which will be used if |
---|
74 | * needed by register window overflow and underflow handlers. |
---|
75 | * |
---|
76 | * This is slightly wasteful, since the stack already has the window |
---|
77 | * overflow space reserved, but there is no obvious way to ensure |
---|
78 | * we can store the interrupted state and still handle window |
---|
79 | * spill/fill correctly, since there is no room for the ISF. |
---|
80 | * |
---|
81 | */ |
---|
82 | |
---|
83 | /* this is for debugging purposes, make sure that TL = 1, otherwise |
---|
84 | * things might get dicey */ |
---|
85 | rdpr %tl, %g1 |
---|
86 | cmp %g1, 1 |
---|
87 | be 1f |
---|
88 | nop |
---|
89 | |
---|
90 | 0: ba 0b |
---|
91 | nop |
---|
92 | |
---|
93 | 1: |
---|
94 | /* first store the sp of the interrupted task temporarily in g1 */ |
---|
95 | mov %sp, %g1 |
---|
96 | |
---|
97 | sub %sp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp |
---|
98 | ! make space for Stack_Frame||ISF |
---|
99 | |
---|
100 | /* save tstate, tpc, tnpc, pil */ |
---|
101 | stx %g4, [%sp + STACK_BIAS + ISF_TSTATE_OFFSET] |
---|
102 | rdpr %pil, %g3 |
---|
103 | rdpr %tpc, %g4 |
---|
104 | rdpr %tnpc, %g5 |
---|
105 | stx %g3, [%sp + STACK_BIAS + ISF_PIL_OFFSET] |
---|
106 | stx %g4, [%sp + STACK_BIAS + ISF_TPC_OFFSET] |
---|
107 | stx %g5, [%sp + STACK_BIAS + ISF_TNPC_OFFSET] |
---|
108 | stx %g2, [%sp + STACK_BIAS + ISF_TVEC_OFFSET] |
---|
109 | |
---|
110 | rd %y, %g4 ! save y |
---|
111 | stx %g4, [%sp + STACK_BIAS + ISF_Y_OFFSET] |
---|
112 | |
---|
113 | ! save interrupted frame's output regs |
---|
114 | stx %o0, [%sp + STACK_BIAS + ISF_O0_OFFSET] ! save o0 |
---|
115 | stx %o1, [%sp + STACK_BIAS + ISF_O1_OFFSET] ! save o1 |
---|
116 | stx %o2, [%sp + STACK_BIAS + ISF_O2_OFFSET] ! save o2 |
---|
117 | stx %o3, [%sp + STACK_BIAS + ISF_O3_OFFSET] ! save o3 |
---|
118 | stx %o4, [%sp + STACK_BIAS + ISF_O4_OFFSET] ! save o4 |
---|
119 | stx %o5, [%sp + STACK_BIAS + ISF_O5_OFFSET] ! save o5 |
---|
120 | stx %g1, [%sp + STACK_BIAS + ISF_O6_SP_OFFSET] ! save o6/sp |
---|
121 | stx %o7, [%sp + STACK_BIAS + ISF_O7_OFFSET] ! save o7 |
---|
122 | |
---|
123 | mov %g1, %o5 ! hold the old sp here for now |
---|
124 | mov %g2, %o1 ! we'll need trap # later |
---|
125 | |
---|
126 | /* switch to TL[0] */ |
---|
127 | wrpr %g0, 0, %tl |
---|
128 | |
---|
129 | /* switch to normal globals */ |
---|
130 | #if defined (SUN4U) |
---|
131 | /* the assignment to pstate below will mask out the AG bit */ |
---|
132 | #elif defined (SUN4V) |
---|
133 | wrpr %g0, 0, %gl |
---|
134 | #endif |
---|
135 | /* get pstate to known state */ |
---|
136 | wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK, %pstate |
---|
137 | |
---|
138 | ! save globals |
---|
139 | stx %g1, [%sp + STACK_BIAS + ISF_G1_OFFSET] ! save g1 |
---|
140 | stx %g2, [%sp + STACK_BIAS + ISF_G2_OFFSET] ! save g2 |
---|
141 | stx %g3, [%sp + STACK_BIAS + ISF_G3_OFFSET] ! save g3 |
---|
142 | stx %g4, [%sp + STACK_BIAS + ISF_G4_OFFSET] ! save g4 |
---|
143 | stx %g5, [%sp + STACK_BIAS + ISF_G5_OFFSET] ! save g5 |
---|
144 | stx %g6, [%sp + STACK_BIAS + ISF_G6_OFFSET] ! save g6 |
---|
145 | stx %g7, [%sp + STACK_BIAS + ISF_G7_OFFSET] ! save g7 |
---|
146 | |
---|
147 | |
---|
148 | mov %o1, %g2 ! get the trap # |
---|
149 | mov %o5, %g7 ! store the interrupted %sp (preserve) |
---|
150 | mov %sp, %o1 ! 2nd arg to ISR Handler = address of ISF |
---|
151 | add %o1, STACK_BIAS, %o1 ! need to adjust for stack bias, 2nd arg = ISF |
---|
152 | |
---|
153 | /* |
---|
154 | * Increment ISR nest level and Thread dispatch disable level. |
---|
155 | * |
---|
156 | * Register usage for this section: (note, these are used later) |
---|
157 | * |
---|
158 | * g3 = _Thread_Dispatch_disable_level pointer |
---|
159 | * g5 = _Thread_Dispatch_disable_level value (uint32_t) |
---|
160 | * g6 = _ISR_Nest_level pointer |
---|
161 | * g4 = _ISR_Nest_level value (uint32_t) |
---|
162 | * o5 = temp |
---|
163 | * |
---|
164 | * NOTE: It is assumed that g6 - g7 will be preserved until the ISR |
---|
165 | * nest and thread dispatch disable levels are unnested. |
---|
166 | */ |
---|
167 | |
---|
168 | setx THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3 |
---|
169 | lduw [%g3], %g5 |
---|
170 | setx ISR_NEST_LEVEL, %o5, %g6 |
---|
171 | lduw [%g6], %g4 |
---|
172 | |
---|
173 | add %g5, 1, %g5 |
---|
174 | stuw %g5, [%g3] |
---|
175 | |
---|
176 | add %g4, 1, %g4 |
---|
177 | stuw %g4, [%g6] |
---|
178 | |
---|
179 | /* |
---|
180 | * If ISR nest level was zero (now 1), then switch stack. |
---|
181 | */ |
---|
182 | |
---|
183 | subcc %g4, 1, %g4 ! outermost interrupt handler? |
---|
184 | bnz dont_switch_stacks ! No, then do not switch stacks |
---|
185 | |
---|
186 | setx SYM(INTERRUPT_STACK_HIGH), %o5, %g1 |
---|
187 | ldx [%g1], %sp |
---|
188 | |
---|
189 | /* |
---|
190 | * Adjust the stack for the stack bias |
---|
191 | */ |
---|
192 | sub %sp, STACK_BIAS, %sp |
---|
193 | |
---|
194 | /* |
---|
195 | * Make sure we have a place on the stack for the window overflow |
---|
196 | * trap handler to write into. At this point it is safe to |
---|
197 | * enable traps again. |
---|
198 | */ |
---|
199 | |
---|
200 | sub %sp, SPARC64_MINIMUM_STACK_FRAME_SIZE, %sp |
---|
201 | |
---|
202 | dont_switch_stacks: |
---|
203 | /* |
---|
204 | * Check if we have an external interrupt (trap 0x41 - 0x4f). If so, |
---|
205 | * set the PIL to mask off interrupts with lower priority. |
---|
206 | * |
---|
207 | * The original PIL is not modified since it will be restored |
---|
208 | * when the interrupt handler returns. |
---|
209 | */ |
---|
210 | |
---|
211 | and %g2, 0x0ff, %g1 ! is bottom byte of vector number [0x41,0x4f]? |
---|
212 | |
---|
213 | subcc %g1, 0x41, %g0 |
---|
214 | bl dont_fix_pil |
---|
215 | subcc %g1, 0x4f, %g0 |
---|
216 | bg dont_fix_pil |
---|
217 | nop |
---|
218 | wrpr %g0, %g1, %pil |
---|
219 | |
---|
220 | dont_fix_pil: |
---|
221 | /* We need to be careful about enabling traps here. |
---|
222 | * |
---|
223 | * We already stored off the tstate, tpc, and tnpc, and switched to |
---|
224 | * TL = 0, so it should be safe. |
---|
225 | */ |
---|
226 | |
---|
227 | /* zero out g4 so that ofw calls work */ |
---|
228 | mov %g0, %g4 |
---|
229 | |
---|
230 | ! **** ENABLE TRAPS **** |
---|
231 | wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \ |
---|
232 | SPARC_PSTATE_IE_MASK, %pstate |
---|
233 | |
---|
234 | /* |
---|
235 | * Vector to user's handler. |
---|
236 | * |
---|
237 | * NOTE: TBR may no longer have vector number in it since |
---|
238 | * we just enabled traps. It is definitely in g2. |
---|
239 | */ |
---|
240 | setx SYM(_ISR_Vector_table), %o5, %g1 |
---|
241 | and %g2, 0x1FF, %o5 ! remove synchronous trap indicator |
---|
242 | sll %o5, 3, %o5 ! o5 = offset into table |
---|
243 | ldx [%g1 + %o5], %g1 ! g1 = _ISR_Vector_table[ vector ] |
---|
244 | |
---|
245 | |
---|
246 | ! o1 = 2nd arg = address of the ISF |
---|
247 | ! WAS LOADED WHEN ISF WAS SAVED!!! |
---|
248 | mov %g2, %o0 ! o0 = 1st arg = vector number |
---|
249 | call %g1, 0 |
---|
250 | nop ! delay slot |
---|
251 | |
---|
252 | /* |
---|
253 | * Redisable traps so we can finish up the interrupt processing. |
---|
254 | * This is a conservative place to do this. |
---|
255 | */ |
---|
256 | ! **** DISABLE TRAPS **** |
---|
257 | wrpr %g0, SPARC_PSTATE_PRIV_MASK, %pstate |
---|
258 | |
---|
259 | /* |
---|
260 | * We may safely use any of the %o and %g registers, because |
---|
261 | * we saved them earlier (and any other interrupt that uses |
---|
262 | * them will also save them). Right now, the state of those |
---|
263 | * registers are as follows: |
---|
264 | * %o registers: unknown (user's handler may have destroyed) |
---|
265 | * %g1,g4,g5: scratch |
---|
266 | * %g2: unknown: was trap vector |
---|
267 | * %g3: uknown: was _Thread_Dispatch_Disable_level pointer |
---|
268 | * %g6: _ISR_Nest_level |
---|
269 | * %g7: interrupted task's sp |
---|
270 | */ |
---|
271 | |
---|
272 | /* |
---|
273 | * Increment ISR nest level and Thread dispatch disable level. |
---|
274 | * |
---|
275 | * Register usage for this section: (note: as used above) |
---|
276 | * |
---|
277 | * g3 = _Thread_Dispatch_disable_level pointer |
---|
278 | * g5 = _Thread_Dispatch_disable_level value |
---|
279 | * g6 = _ISR_Nest_level pointer |
---|
280 | * g4 = _ISR_Nest_level value |
---|
281 | * o5 = temp |
---|
282 | */ |
---|
283 | |
---|
284 | /* We have to re-load the values from memory, because there are |
---|
285 | * not enough registers that we know will be preserved across the |
---|
286 | * user's handler. If this is a problem, we can create a register |
---|
287 | * window for _ISR_Handler. |
---|
288 | */ |
---|
289 | |
---|
290 | setx THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3 |
---|
291 | lduw [%g3],%g5 |
---|
292 | lduw [%g6],%g4 |
---|
293 | sub %g5, 1, %g5 |
---|
294 | stuw %g5, [%g3] |
---|
295 | sub %g4, 1, %g4 |
---|
296 | stuw %g4, [%g6] |
---|
297 | |
---|
298 | orcc %g4, %g0, %g0 ! ISRs still nested? |
---|
299 | bnz dont_restore_stack ! Yes then don't restore stack yet |
---|
300 | nop |
---|
301 | |
---|
302 | /* |
---|
303 | * This is the outermost interrupt handler. Need to get off the |
---|
304 | * CPU Interrupt Stack and back to the tasks stack. |
---|
305 | * |
---|
306 | * The following subtract should get us back on the interrupted |
---|
307 | * tasks stack and add enough room to invoke the dispatcher. |
---|
308 | * When we enable traps, we are mostly back in the context |
---|
309 | * of the task and subsequent interrupts can operate normally. |
---|
310 | * |
---|
311 | * Now %sp points to the bottom of the ISF. |
---|
312 | * |
---|
313 | */ |
---|
314 | |
---|
315 | sub %g7, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp |
---|
316 | |
---|
317 | dont_restore_stack: |
---|
318 | |
---|
319 | /* |
---|
320 | * If dispatching is disabled (includes nested interrupt case), |
---|
321 | * then do a "simple" exit. |
---|
322 | */ |
---|
323 | |
---|
324 | orcc %g5, %g0, %g0 ! Is dispatching disabled? |
---|
325 | bnz simple_return ! Yes, then do a "simple" exit |
---|
326 | ! NOTE: Use the delay slot |
---|
327 | mov %g0, %g4 ! clear g4 for ofw |
---|
328 | |
---|
329 | ! Are we dispatching from a previous ISR in the interrupted thread? |
---|
330 | setx SYM(_CPU_ISR_Dispatch_disable), %o5, %g5 |
---|
331 | lduw [%g5], %o5 |
---|
332 | orcc %o5, %g0, %g0 ! Is this thread already doing an ISR? |
---|
333 | bnz simple_return ! Yes, then do a "simple" exit |
---|
334 | nop |
---|
335 | |
---|
336 | setx DISPATCH_NEEDED, %o5, %g7 |
---|
337 | |
---|
338 | |
---|
339 | /* |
---|
340 | * If a context switch is necessary, then do fudge stack to |
---|
341 | * return to the interrupt dispatcher. |
---|
342 | */ |
---|
343 | |
---|
344 | ldub [%g7], %o5 |
---|
345 | |
---|
346 | orcc %o5, %g0, %g0 ! Is thread switch necessary? |
---|
347 | bz simple_return ! no, then do a simple return. otherwise fallthru |
---|
348 | nop |
---|
349 | |
---|
350 | /* |
---|
351 | * Invoke interrupt dispatcher. |
---|
352 | */ |
---|
353 | |
---|
354 | ! Set ISR dispatch nesting prevention flag |
---|
355 | mov 1, %o1 |
---|
356 | setx SYM(_CPU_ISR_Dispatch_disable), %o5, %o2 |
---|
357 | stuw %o1, [%o2] |
---|
358 | |
---|
359 | |
---|
360 | ! **** ENABLE TRAPS **** |
---|
361 | wrpr %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \ |
---|
362 | SPARC_PSTATE_IE_MASK, %pstate |
---|
363 | isr_dispatch: |
---|
364 | call SYM(_Thread_Dispatch), 0 |
---|
365 | nop |
---|
366 | |
---|
367 | /* |
---|
368 | * We invoked _Thread_Dispatch in a state similar to the interrupted |
---|
369 | * task. In order to safely be able to tinker with the register |
---|
370 | * windows and get the task back to its pre-interrupt state, |
---|
371 | * we need to disable interrupts. |
---|
372 | */ |
---|
373 | mov 2, %g4 ! syscall (disable interrupts) |
---|
374 | ta 0 ! syscall (disable interrupts) |
---|
375 | mov 0, %g4 |
---|
376 | |
---|
377 | /* |
---|
378 | * While we had ISR dispatching disabled in this thread, |
---|
379 | * did we miss anything. If so, then we need to do another |
---|
380 | * _Thread_Dispatch before leaving this ISR Dispatch context. |
---|
381 | */ |
---|
382 | |
---|
383 | setx DISPATCH_NEEDED, %o5, %o1 |
---|
384 | ldub [%o1], %o2 |
---|
385 | |
---|
386 | orcc %o2, %g0, %g0 ! Is thread switch necessary? |
---|
387 | bz allow_nest_again ! No, then clear out and return |
---|
388 | nop |
---|
389 | |
---|
390 | ! Yes, then invoke the dispatcher |
---|
391 | dispatchAgain: |
---|
392 | mov 3, %g4 ! syscall (enable interrupts) |
---|
393 | ta 0 ! syscall (enable interrupts) |
---|
394 | ba isr_dispatch |
---|
395 | mov 0, %g4 |
---|
396 | |
---|
397 | allow_nest_again: |
---|
398 | |
---|
399 | ! Zero out ISR stack nesting prevention flag |
---|
400 | setx SYM(_CPU_ISR_Dispatch_disable), %o5, %o1 |
---|
401 | stuw %g0,[%o1] |
---|
402 | |
---|
403 | /* |
---|
404 | * The CWP in place at this point may be different from |
---|
405 | * that which was in effect at the beginning of the ISR if we |
---|
406 | * have been context switched between the beginning of this invocation |
---|
407 | * of _ISR_Handler and this point. Thus the CWP and WIM should |
---|
408 | * not be changed back to their values at ISR entry time. Any |
---|
409 | * changes to the PSR must preserve the CWP. |
---|
410 | */ |
---|
411 | |
---|
412 | simple_return: |
---|
413 | flushw ! get register windows to a 'clean' state |
---|
414 | |
---|
415 | ! **** DISABLE TRAPS **** |
---|
416 | wrpr %g0, SPARC_PSTATE_PRIV_MASK, %pstate |
---|
417 | |
---|
418 | ldx [%sp + STACK_BIAS + ISF_Y_OFFSET], %o1 ! restore y |
---|
419 | wr %o1, 0, %y |
---|
420 | |
---|
421 | ldx [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1 |
---|
422 | |
---|
423 | ! see if cwp is proper (tstate.cwp == cwp) |
---|
424 | and %g1, 0x1F, %g6 |
---|
425 | rdpr %cwp, %g7 |
---|
426 | cmp %g6, %g7 |
---|
427 | bz good_window |
---|
428 | nop |
---|
429 | |
---|
430 | /* |
---|
431 | * Fix the CWP. Need the cwp to be the proper cwp that |
---|
432 | * gets restored when returning from the trap via retry/done. Do |
---|
433 | * this before reloading the task's output regs. Basically fake a |
---|
434 | * window spill/fill. |
---|
435 | * |
---|
436 | * Is this necessary on sun4v? Why not just re-write |
---|
437 | * tstate.cwp to be equal to the current cwp? |
---|
438 | */ |
---|
439 | mov %sp, %g1 |
---|
440 | stx %l0, [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET] |
---|
441 | stx %l1, [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET] |
---|
442 | stx %l2, [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET] |
---|
443 | stx %l3, [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET] |
---|
444 | stx %l4, [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET] |
---|
445 | stx %l5, [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET] |
---|
446 | stx %l6, [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET] |
---|
447 | stx %l7, [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET] |
---|
448 | stx %i0, [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET] |
---|
449 | stx %i1, [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET] |
---|
450 | stx %i2, [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET] |
---|
451 | stx %i3, [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET] |
---|
452 | stx %i4, [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET] |
---|
453 | stx %i5, [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET] |
---|
454 | stx %i6, [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET] |
---|
455 | stx %i7, [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET] |
---|
456 | wrpr %g0, %g6, %cwp |
---|
457 | mov %g1, %sp |
---|
458 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET], %l0 |
---|
459 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET], %l1 |
---|
460 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET], %l2 |
---|
461 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET], %l3 |
---|
462 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET], %l4 |
---|
463 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET], %l5 |
---|
464 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET], %l6 |
---|
465 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET], %l7 |
---|
466 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET], %i0 |
---|
467 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET], %i1 |
---|
468 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET], %i2 |
---|
469 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET], %i3 |
---|
470 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET], %i4 |
---|
471 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET], %i5 |
---|
472 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET], %i6 |
---|
473 | ldx [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET], %i7 |
---|
474 | |
---|
475 | |
---|
476 | good_window: |
---|
477 | |
---|
478 | |
---|
479 | /* |
---|
480 | * Restore tasks global and out registers |
---|
481 | */ |
---|
482 | |
---|
483 | ldx [%sp + STACK_BIAS + ISF_G1_OFFSET], %g1 ! restore g1 |
---|
484 | ldx [%sp + STACK_BIAS + ISF_G2_OFFSET], %g2 ! restore g2 |
---|
485 | ldx [%sp + STACK_BIAS + ISF_G3_OFFSET], %g3 ! restore g3 |
---|
486 | ldx [%sp + STACK_BIAS + ISF_G4_OFFSET], %g4 ! restore g4 |
---|
487 | ldx [%sp + STACK_BIAS + ISF_G5_OFFSET], %g5 ! restore g5 |
---|
488 | ldx [%sp + STACK_BIAS + ISF_G6_OFFSET], %g6 ! restore g6 |
---|
489 | ldx [%sp + STACK_BIAS + ISF_G7_OFFSET], %g7 ! restore g7 |
---|
490 | |
---|
491 | ! Assume the interrupted context is in TL 0 with GL 0 / normal globals. |
---|
492 | ! When tstate is restored at done/retry, the interrupted context is restored. |
---|
493 | ! return to TL[1], GL[1], and restore TSTATE, TPC, and TNPC |
---|
494 | wrpr %g0, 1, %tl |
---|
495 | |
---|
496 | ! return to GL=1 or AG |
---|
497 | #if defined(SUN4U) |
---|
498 | rdpr %pstate, %o1 |
---|
499 | or %o1, SPARC_PSTATE_AG_MASK, %o1 |
---|
500 | wrpr %o1, %g0, %pstate ! go to AG. |
---|
501 | #elif defined(SUN4V) |
---|
502 | wrpr %g0, 1, %gl |
---|
503 | #endif |
---|
504 | |
---|
505 | ! now we can use global registers (at gl=1 or AG) |
---|
506 | ldx [%sp + STACK_BIAS + ISF_PIL_OFFSET], %g3 |
---|
507 | ldx [%sp + STACK_BIAS + ISF_TPC_OFFSET], %g4 |
---|
508 | ldx [%sp + STACK_BIAS + ISF_TNPC_OFFSET], %g5 |
---|
509 | ldx [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1 |
---|
510 | ldx [%sp + STACK_BIAS + ISF_TVEC_OFFSET], %g2 |
---|
511 | wrpr %g0, %g3, %pil |
---|
512 | wrpr %g0, %g4, %tpc |
---|
513 | wrpr %g0, %g5, %tnpc |
---|
514 | |
---|
515 | wrpr %g0, %g1, %tstate |
---|
516 | |
---|
517 | ldx [%sp + STACK_BIAS + ISF_O0_OFFSET], %o0 ! restore o0 |
---|
518 | ldx [%sp + STACK_BIAS + ISF_O1_OFFSET], %o1 ! restore o1 |
---|
519 | ldx [%sp + STACK_BIAS + ISF_O2_OFFSET], %o2 ! restore o2 |
---|
520 | ldx [%sp + STACK_BIAS + ISF_O3_OFFSET], %o3 ! restore o3 |
---|
521 | ldx [%sp + STACK_BIAS + ISF_O4_OFFSET], %o4 ! restore o4 |
---|
522 | ldx [%sp + STACK_BIAS + ISF_O5_OFFSET], %o5 ! restore o5 |
---|
523 | ! sp is restored later |
---|
524 | ldx [%sp + STACK_BIAS + ISF_O7_OFFSET], %o7 ! restore o7 |
---|
525 | |
---|
526 | ldx [%sp + STACK_BIAS + ISF_O6_SP_OFFSET], %o6 ! restore o6/sp |
---|
527 | |
---|
528 | /* |
---|
529 | * Determine whether to re-execute the trapping instruction |
---|
530 | * (asynchronous trap) or to skip the trapping instruction |
---|
531 | * (synchronous trap). |
---|
532 | */ |
---|
533 | |
---|
534 | andcc %g2, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0 |
---|
535 | ! Is this a synchronous trap? |
---|
536 | be not_synch ! No, then skip trapping instruction |
---|
537 | mov 0, %g4 |
---|
538 | retry ! re-execute trapping instruction |
---|
539 | not_synch: |
---|
540 | done ! skip trapping instruction |
---|
541 | |
---|
542 | /* end of file */ |
---|