source: rtems/c/src/lib/libbsp/sparc/shared/irq_asm.S @ 47a61aa1

4.115
Last change on this file since 47a61aa1 was 47a61aa1, checked in by Joel Sherrill <joel.sherrill@…>, on 10/07/11 at 14:35:03

2011-10-07 Daniel Hellstrom <daniel@…>

PR 1933/cpukit

  • shared/irq_asm.S: From code inspection I have found the following issues (most SMP), and some improvements in irq_asm.S. I would need a long test with interrupts to verify the interrupt handler better, however I can not see that these patches hurt. Please see comment per hunk below, One should go through the file to indent delay-slots correctly, I have fixed some in the patch areas. An extra space is added in front of delay slots to indicate a delay slot.
  • Property mode set to 100644
File size: 20.2 KB
RevLine 
[5d69cd3]1/*  cpu_asm.s
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language.
6 *
7 *  COPYRIGHT (c) 1989-2011.
8 *  On-Line Applications Research Corporation (OAR).
9 *
10 *  The license and distribution terms for this file may be
11 *  found in the file LICENSE in this distribution or at
12 *  http://www.rtems.com/license/LICENSE.
13 *
14 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
15 *  Research Corporation (OAR) under contract to the European Space
16 *  Agency (ESA).
17 *
18 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
19 *  European Space Agency.
20 *
21 *  $Id$
22 */
23
24#include <rtems/asm.h>
25#include <rtems/system.h>
26#include <bspopts.h>
27
28/*
29 *  void _ISR_Handler()
30 *
31 *  This routine provides the RTEMS interrupt management.
32 *
33 *  We enter this handler from the 4 instructions in the trap table with
34 *  the following registers assumed to be set as shown:
35 *
36 *    l0 = PSR
37 *    l1 = PC
38 *    l2 = nPC
39 *    l3 = trap type
40 *
41 *  NOTE: By an executive defined convention, trap type is between 0 and 255 if
42 *        it is an asynchonous trap and 256 and 511 if it is synchronous.
43 */
44
45        .align 4
46        PUBLIC(_ISR_Handler)
47SYM(_ISR_Handler):
48        /*
49         *  Fix the return address for synchronous traps.
50         */
51
52        andcc   %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
53                                      ! Is this a synchronous trap?
54        be,a    win_ovflow            ! No, then skip the adjustment
55        nop                           ! DELAY
56        mov     %l1, %l6              ! save trapped pc for debug info
57        mov     %l2, %l1              ! do not return to the instruction
58        add     %l2, 4, %l2           ! indicated
59
60win_ovflow:
61        /*
62         *  Save the globals this block uses.
63         *
64         *  These registers are not restored from the locals.  Their contents
65         *  are saved directly from the locals into the ISF below.
66         */
67
68        mov     %g4, %l4                 ! save the globals this block uses
69        mov     %g5, %l5
70
71        /*
72         *  When at a "window overflow" trap, (wim == (1 << cwp)).
73         *  If we get here like that, then process a window overflow.
74         */
75
76        rd      %wim, %g4
77        srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
78                                         !   are LS 5 bits ; how convenient :)
79        cmp     %g5, 1                   ! Is this an invalid window?
80        bne     dont_do_the_window       ! No, then skip all this stuff
81        ! we are using the delay slot
82
83        /*
84         *  The following is same as a 1 position right rotate of WIM
85         */
86
87        srl     %g4, 1, %g5              ! g5 = WIM >> 1
88        sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
89                                         ! g4 = WIM << (Number Windows - 1)
90        or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
91                                         !      (WIM << (Number Windows - 1))
92
93        /*
94         *  At this point:
95         *
96         *    g4 = the new WIM
97         *    g5 is free
98         */
99
100        /*
101         *  Since we are tinkering with the register windows, we need to
102         *  make sure that all the required information is in global registers.
103         */
104
105        save                          ! Save into the window
106        wr      %g4, 0, %wim          ! WIM = new WIM
107        nop                           ! delay slots
108        nop
109        nop
110
111        /*
112         *  Now save the window just as if we overflowed to it.
113         */
114
115        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
116        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
117        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
118        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
119
120        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
121        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
122        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
123        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
124
125        restore
126        nop
127
128dont_do_the_window:
129        /*
130         *  Global registers %g4 and %g5 are saved directly from %l4 and
131         *  %l5 directly into the ISF below.
132         */
133
134save_isf:
135
136        /*
137         *  Save the state of the interrupted task -- especially the global
138         *  registers -- in the Interrupt Stack Frame.  Note that the ISF
139         *  includes a regular minimum stack frame which will be used if
140         *  needed by register window overflow and underflow handlers.
141         *
142         *  REGISTERS SAME AS AT _ISR_Handler
143         */
144
145        sub     %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
146                                               ! make space for ISF
147
148        std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
149        st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
150        st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
151        std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
152        std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
153        std     %g6, [%sp + ISF_G6_OFFSET]     ! save g6, g7
154
155        std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
156        std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
157        std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
158        std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
159
160        rd      %y, %g1
161        st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
162        st      %l6, [%sp + ISF_TPC_OFFSET]    ! save real trapped pc
163
164        mov     %sp, %o1                       ! 2nd arg to ISR Handler
165
166        /*
167         *  Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
168         *  set the PIL in the %psr to mask off interrupts with lower priority.
169         *  The original %psr in %l0 is not modified since it will be restored
170         *  when the interrupt handler returns.
171         */
172
173        mov      %l0, %g5
174        and      %l3, 0x0ff, %g4
175
176/* This is a fix for ERC32 with FPU rev.B or rev.C */
177
178#if defined(FPU_REVB)
179
180
181        subcc    %g4, 0x08, %g0
182        be       fpu_revb
183        subcc    %g4, 0x11, %g0
184        bl       dont_fix_pil
185        subcc    %g4, 0x1f, %g0
186        bg       dont_fix_pil
187        sll      %g4, 8, %g4
188        and      %g4, SPARC_PSR_PIL_MASK, %g4
189        andn     %l0, SPARC_PSR_PIL_MASK, %g5
190        or       %g4, %g5, %g5
191        srl      %l0, 12, %g4
192        andcc    %g4, 1, %g0
193        be       dont_fix_pil
194        nop
195        ba,a     enable_irq
196
197
198fpu_revb:
199        srl      %l0, 12, %g4   ! check if EF is set in %psr
200        andcc    %g4, 1, %g0
201        be       dont_fix_pil   ! if FPU disabled than continue as normal
202        and      %l3, 0xff, %g4
203        subcc    %g4, 0x08, %g0
204        bne      enable_irq     ! if not a FPU exception then do two fmovs
205        set      __sparc_fq, %g4
206        st       %fsr, [%g4]    ! if FQ is not empty and FQ[1] = fmovs
207        ld       [%g4], %g4     ! than this is bug 3.14
208        srl      %g4, 13, %g4
209        andcc    %g4, 1, %g0
210        be       dont_fix_pil
211        set      __sparc_fq, %g4
212        std      %fq, [%g4]
213        ld       [%g4+4], %g4
214        set      0x81a00020, %g5
215        subcc    %g4, %g5, %g0
216        bne,a    dont_fix_pil2
217        wr       %l0, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
218        ba,a     simple_return
219       
220enable_irq:
221        or       %g5, SPARC_PSR_PIL_MASK, %g4
222        wr       %g4, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
223        nop; nop; nop
224        fmovs    %f0, %f0
225        ba       dont_fix_pil
226        fmovs    %f0, %f0
227
228        .data
229        .global __sparc_fq
230        .align 8
231__sparc_fq:
232        .word 0,0
233
234        .text
235/* end of ERC32 FPU rev.B/C fix */
236
237#else
238
239        subcc    %g4, 0x11, %g0
240        bl       dont_fix_pil
241        subcc    %g4, 0x1f, %g0
242        bg       dont_fix_pil
243        sll      %g4, 8, %g4
244        and      %g4, SPARC_PSR_PIL_MASK, %g4
245        andn     %l0, SPARC_PSR_PIL_MASK, %g5
246        ba       pil_fixed
247        or       %g4, %g5, %g5
248#endif
249
250dont_fix_pil:
251        or       %g5, SPARC_PSR_PIL_MASK, %g5
252pil_fixed:
253        wr       %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
254dont_fix_pil2:
255
[0bd3f7e]256        PUBLIC(_ISR_PER_CPU)
257SYM(_ISR_PER_CPU):
258
259#if defined(RTEMS_SMP)
260        sethi    %hi(_Per_CPU_Information_p), %l5
261        add      %l5, %lo(_Per_CPU_Information_p), %l5
262    #if BSP_LEON3_SMP
263        /* LEON3 SMP support */
264        rd      %asr17, %l7
265        srl     %l7, 28, %l7    /* CPU number is upper 4 bits so shift */
266        sll     %l7, 2, %l7     /* l7 = offset */
267        add     %l5, %l7, %l5
268    #endif
269        ld       [%l5], %l5     /* l5 = pointer to per CPU */
270
271        /*
272         *  On multi-core system, we need to use SMP safe versions
273         *  of ISR and Thread Dispatch critical sections.
274         *
275         *  _ISR_SMP_Enter returns the interrupt nest level.  If we are
276         *  outermost interrupt, then we need to switch stacks.
277         */
278        call    SYM(_ISR_SMP_Enter), 0
[47a61aa1]279         mov      %sp, %fp              ! delay slot
[0bd3f7e]280        cmp     %o0, 0
281#else
282        /*
283         *  On single core system, we can directly use variables.
284         *
285         *  Increment ISR nest level and Thread dispatch disable level.
286         *
287         *  Register usage for this section:
288         *
289         *    l4 = _Thread_Dispatch_disable_level pointer
290         *    l5 = _ISR_Nest_level pointer
291         *    l6 = _Thread_Dispatch_disable_level value
292         *    l7 = _ISR_Nest_level value
293         *
294         *  NOTE: It is assumed that l4 - l7 will be preserved until the ISR
295         *        nest and thread dispatch disable levels are unnested.
296         */
297        sethi    %hi(SYM(_Thread_Dispatch_disable_level)), %l4
298        ld       [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
299
300        sethi    %hi(_Per_CPU_Information), %l5
301        add      %l5, %lo(_Per_CPU_Information), %l5
302
303        ld       [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
304
305        add      %l6, 1, %l6
306        st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
307
308        add      %l7, 1, %l7
309        st       %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
310
311        /*
312         *  If ISR nest level was zero (now 1), then switch stack.
313         */
314        mov      %sp, %fp
315        subcc    %l7, 1, %l7             ! outermost interrupt handler?
316#endif
317
318        /*
319         *  Do we need to switch to the interrupt stack?
320         */
[47a61aa1]321        beq,a    dont_switch_stacks      ! No, then do not switch stacks
322         ld      [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
[0bd3f7e]323
324dont_switch_stacks:
325        /*
326         *  Make sure we have a place on the stack for the window overflow
327         *  trap handler to write into.  At this point it is safe to
328         *  enable traps again.
329         */
330
331        sub      %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
332
[5d69cd3]333        /*
334         *  Vector to user's handler.
335         *
336         *  NOTE: TBR may no longer have vector number in it since
337         *        we just enabled traps.  It is definitely in l3.
338         */
339
340        sethi    %hi(SYM(_ISR_Vector_table)), %g4
341        ld       [%g4+%lo(SYM(_ISR_Vector_table))], %g4
342        and      %l3, 0xFF, %g5         ! remove synchronous trap indicator
343        sll      %g5, 2, %g5            ! g5 = offset into table
344        ld       [%g4 + %g5], %g4       ! g4 = _ISR_Vector_table[ vector ]
345
346
347                                        ! o1 = 2nd arg = address of the ISF
348                                        !   WAS LOADED WHEN ISF WAS SAVED!!!
349        mov      %l3, %o0               ! o0 = 1st arg = vector number
350        call     %g4, 0
351        nop                             ! delay slot
352
[0bd3f7e]353#if defined(RTEMS_SMP)
354        call    SYM(_ISR_SMP_Exit), 0
355        nop                             ! delay slot
356        cmp     %o0, 0
357        bz      simple_return
[47a61aa1]358         nop
[0bd3f7e]359#else
360        !sethi    %hi(SYM(_Thread_Dispatch_disable_level)), %l4
361        !ld       [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
362        !ld       [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
363#endif
364
[5d69cd3]365        /*
366         *  Redisable traps so we can finish up the interrupt processing.
367         *  This is a VERY conservative place to do this.
368         *
369         *  NOTE: %l0 has the PSR which was in place when we took the trap.
370         */
371
372        mov      %l0, %psr             ! **** DISABLE TRAPS ****
373        nop; nop; nop
374
[0bd3f7e]375#if !defined(RTEMS_SMP)
[5d69cd3]376        /*
377         *  Decrement ISR nest level and Thread dispatch disable level.
378         *
379         *  Register usage for this section:
380         *
381         *    l4 = _Thread_Dispatch_disable_level pointer
382         *    l5 = _ISR_Nest_level pointer
383         *    l6 = _Thread_Dispatch_disable_level value
384         *    l7 = _ISR_Nest_level value
385         */
386
387        sub      %l6, 1, %l6
388        st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
389
390        st       %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
391
392        /*
393         *  If dispatching is disabled (includes nested interrupt case),
394         *  then do a "simple" exit.
395         */
396
397        orcc     %l6, %g0, %g0   ! Is dispatching disabled?
398        bnz      simple_return   ! Yes, then do a "simple" exit
399        ! NOTE: Use the delay slot
400        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l6
401
402        ! Are we dispatching from a previous ISR in the interrupted thread?
403        ld       [%l6 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %l7
404        orcc     %l7, %g0, %g0   ! Is this thread already doing an ISR?
405        bnz      simple_return   ! Yes, then do a "simple" exit
[47a61aa1]406         nop
[5d69cd3]407
408        /*
409         *  If a context switch is necessary, then do fudge stack to
410         *  return to the interrupt dispatcher.
411         */
412
413        ldub     [%l5 + PER_CPU_DISPATCH_NEEDED], %l5
414        orcc     %l5, %g0, %g0   ! Is thread switch necessary?
415        bz       simple_return   ! No, then return
[47a61aa1]416         nop
[0bd3f7e]417#endif
[5d69cd3]418        /*
419         *  Invoke interrupt dispatcher.
420         */
421
422        PUBLIC(_ISR_Dispatch)
423SYM(_ISR_Dispatch):
424        ! Set ISR dispatch nesting prevention flag
425        mov      1,%l6
426        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
427        st       %l6,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
428
429        /*
430         *  The following subtract should get us back on the interrupted
431         *  tasks stack and add enough room to invoke the dispatcher.
432         *  When we enable traps, we are mostly back in the context
433         *  of the task and subsequent interrupts can operate normally.
434         */
435
436        sub      %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
437
438        or      %l0, SPARC_PSR_ET_MASK, %l7    ! l7 = PSR with ET=1
439        mov     %l7, %psr                      !  **** ENABLE TRAPS ****
440        nop
441        nop
442        nop
443isr_dispatch:
444        call    SYM(_Thread_Dispatch), 0
445        nop
446
447        /*
448         *  We invoked _Thread_Dispatch in a state similar to the interrupted
449         *  task.  In order to safely be able to tinker with the register
450         *  windows and get the task back to its pre-interrupt state,
451         *  we need to disable interrupts disabled so we can safely tinker
452         *  with the register windowing.  In particular, the CWP in the PSR
453         *  is fragile during this period. (See PR578.)
454         */
455        mov     2,%g1                           ! syscall (disable interrupts)
456        ta      0                               ! syscall (disable interrupts)
457
458        /*
459         *  While we had ISR dispatching disabled in this thread,
460         *  did we miss anything.  If so, then we need to do another
461         *  _Thread_Dispatch before leaving this ISR Dispatch context.
462         */
463
464#if defined(RTEMS_SMP)
465        sethi    %hi(_Per_CPU_Information_p), %l5
466        ld       [%l5 + %lo(_Per_CPU_Information_p)], %l5
467    #if BSP_LEON3_SMP
468        /* LEON3 SMP support */
469        rd      %asr17, %l7
470        srl     %l7, 28, %l7    /* CPU number is upper 4 bits so shift */
471        sll     %l7, 2, %l7     /* l7 = offset */
472        add     %l5, %l7, %l5
473    #else
474        nop
475        nop
476    #endif
477        ld       [%l5], %l5     /* l5 = pointer to per CPU */
478#else
479        sethi    %hi(_Per_CPU_Information), %l5
480        add      %l5, %lo(_Per_CPU_Information), %l5
481#endif
482        ldub     [%l5 + PER_CPU_DISPATCH_NEEDED], %l5
483        orcc     %l5, %g0, %g0   ! Is thread switch necessary?
484        bz       allow_nest_again
485        nop
486
487        ! Yes, then invoke the dispatcher
488dispatchAgain:
489        mov     3,%g1                           ! syscall (enable interrupts)
490        ta      0                               ! syscall (enable interrupts)
491        ba      isr_dispatch
492        nop
493
494allow_nest_again:
495
496        ! Zero out ISR stack nesting prevention flag
497        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
498        st       %g0,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
499
500        /*
501         *  The CWP in place at this point may be different from
502         *  that which was in effect at the beginning of the ISR if we
503         *  have been context switched between the beginning of this invocation
504         *  of _ISR_Handler and this point.  Thus the CWP and WIM should
505         *  not be changed back to their values at ISR entry time.  Any
506         *  changes to the PSR must preserve the CWP.
507         */
508
509simple_return:
510        ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
511        wr      %l5, 0, %y
512
513        ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
514        ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
515        rd      %psr, %l3
516        and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
517        andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
518        or      %l3, %l0, %l0                  ! install it later...
519        andn    %l0, SPARC_PSR_ET_MASK, %l0
520
521        /*
522         *  Restore tasks global and out registers
523         */
524
525        mov    %fp, %g1
526
527                                              ! g1 is restored later
528        ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
529        ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
530        ldd     [%fp + ISF_G6_OFFSET], %g6    ! restore g6, g7
531
532        ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
533        ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
534        ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
535        ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
536
537        /*
538         *  Registers:
539         *
540         *   ALL global registers EXCEPT G1 and the input registers have
541         *   already been restored and thuse off limits.
542         *
543         *   The following is the contents of the local registers:
544         *
545         *     l0 = original psr
546         *     l1 = return address (i.e. PC)
547         *     l2 = nPC
548         *     l3 = CWP
549         */
550
551        /*
552         *  if (CWP + 1) is an invalid window then we need to reload it.
553         *
554         *  WARNING: Traps should now be disabled
555         */
556
557        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
558        nop
559        nop
560        nop
561        rd      %wim, %l4
562        add     %l0, 1, %l6                ! l6 = cwp + 1
563        and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
564        srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
565                                           !  and CWP are conveniently LS 5 bits
566        cmp     %l5, 1                     ! Is tasks window invalid?
567        bne     good_task_window
568
569        /*
570         *  The following code is the same as a 1 position left rotate of WIM.
571         */
572
573        sll     %l4, 1, %l5                ! l5 = WIM << 1
574        srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
575                                           ! l4 = WIM >> (Number Windows - 1)
576        or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
577                                           !      (WIM >> (Number Windows - 1))
578
579        /*
580         *  Now restore the window just as if we underflowed to it.
581         */
582
583        wr      %l4, 0, %wim               ! WIM = new WIM
584        nop                                ! must delay after writing WIM
585        nop
586        nop
587        restore                            ! now into the tasks window
588
589        ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
590        ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
591        ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
592        ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
593        ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
594        ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
595        ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
596        ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
597                                           ! reload of sp clobbers ISF
598        save                               ! Back to ISR dispatch window
599
600good_task_window:
601
602        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
603        nop; nop; nop
604                                           !  and restore condition codes.
605        ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
606        jmp     %l1                        ! transfer control and
607        rett    %l2                        ! go back to tasks window
608
609/* end of file */
Note: See TracBrowser for help on using the repository browser.