source: rtems/c/src/lib/libbsp/sparc/shared/irq_asm.S @ 3fe1e43

4.115
Last change on this file since 3fe1e43 was b2ec2d15, checked in by Sebastian Huber <sebastian.huber@…>, on 04/22/14 at 05:46:56

sparc: Optimize context switch

The registers g2 through g4 are reserved for applications. GCC uses
them as volatile registers by default. So they are treated like
volatile registers in RTEMS as well.

  • Property mode set to 100644
File size: 24.1 KB
Line 
1/*  cpu_asm.s
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language.
6 *
7 *  COPYRIGHT (c) 1989-2011.
8 *  On-Line Applications Research Corporation (OAR).
9 *
10 *  The license and distribution terms for this file may be
11 *  found in the file LICENSE in this distribution or at
12 *  http://www.rtems.org/license/LICENSE.
13 *
14 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
15 *  Research Corporation (OAR) under contract to the European Space
16 *  Agency (ESA).
17 *
18 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
19 *  European Space Agency.
20 */
21
22#include <rtems/asm.h>
23#include <rtems/score/percpu.h>
24#include <bspopts.h>
25
26.macro GET_SELF_CPU_CONTROL REG, TMP
27        sethi    %hi(_Per_CPU_Information), \REG
28        add      \REG, %lo(_Per_CPU_Information), \REG
29
30#if defined( RTEMS_SMP )
31#if BSP_LEON3_SMP
32        /* LEON3 SMP support */
33        rd       %asr17, \TMP
34        srl      \TMP, LEON3_ASR17_PROCESSOR_INDEX_SHIFT, \TMP
35#else
36        mov      0, \TMP
37#endif
38        sll      \TMP, PER_CPU_CONTROL_SIZE_LOG2, \TMP
39        add      \REG, \TMP, \REG
40#endif /* defined( RTEMS_SMP ) */
41.endm
42
43/*
44 *  void _CPU_Context_switch(
45 *    Context_Control  *run,
46 *    Context_Control  *heir
47 *  )
48 *
49 *  This routine performs a normal non-FP context switch.
50 */
51
52        .align 4
53        PUBLIC(_CPU_Context_switch)
54SYM(_CPU_Context_switch):
55        st      %g5, [%o0 + G5_OFFSET]       ! save the global registers
56        std     %g6, [%o0 + G6_OFFSET]
57
58        std     %l0, [%o0 + L0_OFFSET]       ! save the local registers
59        std     %l2, [%o0 + L2_OFFSET]
60        std     %l4, [%o0 + L4_OFFSET]
61        std     %l6, [%o0 + L6_OFFSET]
62
63        std     %i0, [%o0 + I0_OFFSET]       ! save the input registers
64        std     %i2, [%o0 + I2_OFFSET]
65        std     %i4, [%o0 + I4_OFFSET]
66        std     %i6, [%o0 + I6_FP_OFFSET]
67
68        std     %o6, [%o0 + O6_SP_OFFSET]    ! save the output registers
69
70        ! o3 = self per-CPU control
71        GET_SELF_CPU_CONTROL %o3, %o4
72
73        ! load the ISR stack nesting prevention flag
74        ld      [%o3 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE], %o4
75        ! save it a bit later so we do not waste a couple of cycles
76
77        rd      %psr, %o2
78        st      %o2, [%o0 + PSR_OFFSET]      ! save status register
79
80        ! Now actually save ISR stack nesting prevention flag
81        st       %o4, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
82
83        /*
84         *  This is entered from _CPU_Context_restore with:
85         *    o1 = context to restore
86         *    o2 = psr
87         *    o3 = self per-CPU control
88         */
89
90        PUBLIC(_CPU_Context_restore_heir)
91SYM(_CPU_Context_restore_heir):
92        /*
93         *  Flush all windows with valid contents except the current one.
94         *  In examining the set register windows, one may logically divide
95         *  the windows into sets (some of which may be empty) based on their
96         *  current status:
97         *
98         *    + current (i.e. in use),
99         *    + used (i.e. a restore would not trap)
100         *    + invalid (i.e. 1 in corresponding bit in WIM)
101         *    + unused
102         *
103         *  Either the used or unused set of windows may be empty.
104         *
105         *  NOTE: We assume only one bit is set in the WIM at a time.
106         *
107         *  Given a CWP of 5 and a WIM of 0x1, the registers are divided
108         *  into sets as follows:
109         *
110         *    + 0   - invalid
111         *    + 1-4 - unused
112         *    + 5   - current
113         *    + 6-7 - used
114         *
115         *  In this case, we only would save the used windows -- 6 and 7.
116         *
117         *   Traps are disabled for the same logical period as in a
118         *     flush all windows trap handler.
119         *
120         *    Register Usage while saving the windows:
121         *      g1 = current PSR
122         *      g2 = current wim
123         *      g3 = CWP
124         *      g4 = wim scratch
125         *      g5 = scratch
126         */
127
128        ld      [%o1 + PSR_OFFSET], %g1       ! g1 = saved psr
129
130        and     %o2, SPARC_PSR_CWP_MASK, %g3  ! g3 = CWP
131                                              ! g1 = psr w/o cwp
132        andn    %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1
133        or      %g1, %g3, %g1                 ! g1 = heirs psr
134        mov     %g1, %psr                     ! restore status register and
135                                              ! **** DISABLE TRAPS ****
136        mov     %wim, %g2                     ! g2 = wim
137        mov     1, %g4
138        sll     %g4, %g3, %g4                 ! g4 = WIM mask for CW invalid
139
140save_frame_loop:
141        sll     %g4, 1, %g5                   ! rotate the "wim" left 1
142        srl     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4
143        or      %g4, %g5, %g4                 ! g4 = wim if we do one restore
144
145        /*
146         *  If a restore would not underflow, then continue.
147         */
148
149        andcc   %g4, %g2, %g0                 ! Any windows to flush?
150        bnz     done_flushing                 ! No, then continue
151        nop
152
153        restore                               ! back one window
154
155        /*
156         *  Now save the window just as if we overflowed to it.
157         */
158
159        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
160        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
161        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
162        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
163
164        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
165        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
166        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
167        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
168
169        ba      save_frame_loop
170        nop
171
172done_flushing:
173
174        add     %g3, 1, %g3                   ! calculate desired WIM
175        and     %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3
176        mov     1, %g4
177        sll     %g4, %g3, %g4                 ! g4 = new WIM
178        mov     %g4, %wim
179
180        or      %g1, SPARC_PSR_ET_MASK, %g1
181        mov     %g1, %psr                     ! **** ENABLE TRAPS ****
182                                              !   and restore CWP
183        nop
184        nop
185        nop
186
187        ld      [%o1 + G5_OFFSET], %g5        ! restore the global registers
188        ldd     [%o1 + G6_OFFSET], %g6
189
190        ! Load thread specific ISR dispatch prevention flag
191        ld      [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
192        ! Store it to memory later to use the cycles
193
194        ldd     [%o1 + L0_OFFSET], %l0        ! restore the local registers
195        ldd     [%o1 + L2_OFFSET], %l2
196        ldd     [%o1 + L4_OFFSET], %l4
197        ldd     [%o1 + L6_OFFSET], %l6
198
199        ! Now restore thread specific ISR dispatch prevention flag
200        st      %o2, [%o3 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
201
202        ldd     [%o1 + I0_OFFSET], %i0        ! restore the input registers
203        ldd     [%o1 + I2_OFFSET], %i2
204        ldd     [%o1 + I4_OFFSET], %i4
205        ldd     [%o1 + I6_FP_OFFSET], %i6
206
207        ldd     [%o1 + O6_SP_OFFSET], %o6     ! restore the output registers
208
209        jmp     %o7 + 8                       ! return
210        nop                                   ! delay slot
211
212/*
213 *  void _CPU_Context_restore(
214 *    Context_Control *new_context
215 *  )
216 *
217 *  This routine is generally used only to perform restart self.
218 *
219 *  NOTE: It is unnecessary to reload some registers.
220 */
221        .align 4
222        PUBLIC(_CPU_Context_restore)
223SYM(_CPU_Context_restore):
224        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
225        rd      %psr, %o2
226        GET_SELF_CPU_CONTROL %o3, %o4
227        ba      SYM(_CPU_Context_restore_heir)
228        mov     %i0, %o1                      ! in the delay slot
229
230/*
231 *  void _ISR_Handler()
232 *
233 *  This routine provides the RTEMS interrupt management.
234 *
235 *  We enter this handler from the 4 instructions in the trap table with
236 *  the following registers assumed to be set as shown:
237 *
238 *    l0 = PSR
239 *    l1 = PC
240 *    l2 = nPC
241 *    l3 = trap type
242 *
243 *  NOTE: By an executive defined convention, trap type is between 0 and 255 if
244 *        it is an asynchonous trap and 256 and 511 if it is synchronous.
245 */
246
247        .align 4
248        PUBLIC(_ISR_Handler)
249SYM(_ISR_Handler):
250        /*
251         *  Fix the return address for synchronous traps.
252         */
253
254        andcc   %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
255                                      ! Is this a synchronous trap?
256        be,a    win_ovflow            ! No, then skip the adjustment
257        nop                           ! DELAY
258        mov     %l1, %l6              ! save trapped pc for debug info
259        mov     %l2, %l1              ! do not return to the instruction
260        add     %l2, 4, %l2           ! indicated
261
262win_ovflow:
263        /*
264         *  Save the globals this block uses.
265         *
266         *  These registers are not restored from the locals.  Their contents
267         *  are saved directly from the locals into the ISF below.
268         */
269
270        mov     %g4, %l4                 ! save the globals this block uses
271        mov     %g5, %l5
272
273        /*
274         *  When at a "window overflow" trap, (wim == (1 << cwp)).
275         *  If we get here like that, then process a window overflow.
276         */
277
278        rd      %wim, %g4
279        srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
280                                         !   are LS 5 bits ; how convenient :)
281        cmp     %g5, 1                   ! Is this an invalid window?
282        bne     dont_do_the_window       ! No, then skip all this stuff
283        ! we are using the delay slot
284
285        /*
286         *  The following is same as a 1 position right rotate of WIM
287         */
288
289        srl     %g4, 1, %g5              ! g5 = WIM >> 1
290        sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
291                                         ! g4 = WIM << (Number Windows - 1)
292        or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
293                                         !      (WIM << (Number Windows - 1))
294
295        /*
296         *  At this point:
297         *
298         *    g4 = the new WIM
299         *    g5 is free
300         */
301
302        /*
303         *  Since we are tinkering with the register windows, we need to
304         *  make sure that all the required information is in global registers.
305         */
306
307        save                          ! Save into the window
308        wr      %g4, 0, %wim          ! WIM = new WIM
309        nop                           ! delay slots
310        nop
311        nop
312
313        /*
314         *  Now save the window just as if we overflowed to it.
315         */
316
317        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
318        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
319        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
320        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
321
322        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
323        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
324        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
325        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
326
327        restore
328        nop
329
330dont_do_the_window:
331        /*
332         *  Global registers %g4 and %g5 are saved directly from %l4 and
333         *  %l5 directly into the ISF below.
334         */
335
336save_isf:
337
338        /*
339         *  Save the state of the interrupted task -- especially the global
340         *  registers -- in the Interrupt Stack Frame.  Note that the ISF
341         *  includes a regular minimum stack frame which will be used if
342         *  needed by register window overflow and underflow handlers.
343         *
344         *  REGISTERS SAME AS AT _ISR_Handler
345         */
346
347        sub     %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
348                                               ! make space for ISF
349
350        std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
351        st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
352        st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
353        std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
354        std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
355        std     %g6, [%sp + ISF_G6_OFFSET]     ! save g6, g7
356
357        std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
358        std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
359        std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
360        std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
361
362        rd      %y, %g1
363        st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
364        st      %l6, [%sp + ISF_TPC_OFFSET]    ! save real trapped pc
365
366        mov     %sp, %o1                       ! 2nd arg to ISR Handler
367
368        /*
369         *  Increment ISR nest level and Thread dispatch disable level.
370         *
371         *  Register usage for this section:
372         *
373         *    l5 = per cpu info pointer
374         *    l6 = _Thread_Dispatch_disable_level value
375         *    l7 = _ISR_Nest_level value
376         *
377         *  NOTE: It is assumed that l5 - l7 will be preserved until the ISR
378         *        nest and thread dispatch disable levels are unnested.
379         */
380
381        GET_SELF_CPU_CONTROL %l5, %l7
382
383        ld       [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
384        ld       [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL], %l6
385
386        add      %l7, 1, %l7
387        st       %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
388
389        add      %l6, 1, %l6
390        st       %l6, [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
391
392        /*
393         *  If ISR nest level was zero (now 1), then switch stack.
394         */
395
396        mov      %sp, %fp
397        subcc    %l7, 1, %l7             ! outermost interrupt handler?
398        bnz      dont_switch_stacks      ! No, then do not switch stacks
399
400#if defined( RTEMS_PROFILING )
401         sethi   %hi(SYM(_SPARC_Counter)), %o5
402        ld       [%o5 + %lo(SYM(_SPARC_Counter))], %l4
403        ld       [%l4], %o5
404#else
405         nop
406#endif
407
408        ld       [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
409
410dont_switch_stacks:
411        /*
412         *  Make sure we have a place on the stack for the window overflow
413         *  trap handler to write into.  At this point it is safe to
414         *  enable traps again.
415         */
416
417        sub      %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
418
419        /*
420         *  Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
421         *  set the PIL in the %psr to mask off interrupts with lower priority.
422         *  The original %psr in %l0 is not modified since it will be restored
423         *  when the interrupt handler returns.
424         */
425
426        mov      %l0, %g5
427        and      %l3, 0x0ff, %g4
428        subcc    %g4, 0x11, %g0
429        bl       dont_fix_pil
430        subcc    %g4, 0x1f, %g0
431        bg       dont_fix_pil
432        sll      %g4, 8, %g4
433        and      %g4, SPARC_PSR_PIL_MASK, %g4
434        andn     %l0, SPARC_PSR_PIL_MASK, %g5
435        ba       pil_fixed
436        or       %g4, %g5, %g5
437dont_fix_pil:
438        or       %g5, SPARC_PSR_PIL_MASK, %g5
439pil_fixed:
440        wr       %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
441dont_fix_pil2:
442
443        /*
444         *  Vector to user's handler.
445         *
446         *  NOTE: TBR may no longer have vector number in it since
447         *        we just enabled traps.  It is definitely in l3.
448         */
449
450        sethi    %hi(SYM(_ISR_Vector_table)), %g4
451        ld       [%g4+%lo(SYM(_ISR_Vector_table))], %g4
452        and      %l3, 0xFF, %g5         ! remove synchronous trap indicator
453        sll      %g5, 2, %g5            ! g5 = offset into table
454        ld       [%g4 + %g5], %g4       ! g4 = _ISR_Vector_table[ vector ]
455
456
457                                        ! o1 = 2nd arg = address of the ISF
458                                        !   WAS LOADED WHEN ISF WAS SAVED!!!
459        mov      %l3, %o0               ! o0 = 1st arg = vector number
460        call     %g4, 0
461#if defined( RTEMS_PROFILING )
462         mov     %o5, %l3               ! save interrupt entry instant
463        cmp      %l7, 0
464        bne      profiling_not_outer_most_exit
465         nop
466        call     SYM(sparc_disable_interrupts), 0
467         nop
468        ld       [%l4], %o2             ! o2 = 3rd arg = interrupt exit instant
469        mov      %l3, %o1               ! o1 = 2nd arg = interrupt entry instant
470        call     SYM(_Profiling_Outer_most_interrupt_entry_and_exit), 0
471         mov     %l5, %o0               ! o0 = 1st arg = per-CPU control
472profiling_not_outer_most_exit:
473#else
474         nop                            ! delay slot
475#endif
476
477        /*
478         *  Redisable traps so we can finish up the interrupt processing.
479         *  This is a VERY conservative place to do this.
480         *
481         *  NOTE: %l0 has the PSR which was in place when we took the trap.
482         */
483
484        mov      %l0, %psr             ! **** DISABLE TRAPS ****
485        nop; nop; nop
486
487        /*
488         *  Decrement ISR nest level and Thread dispatch disable level.
489         *
490         *  Register usage for this section:
491         *
492         *    l5 = per cpu info pointer
493         *    l6 = _Thread_Dispatch_disable_level value
494         *    l7 = _ISR_Nest_level value
495         */
496
497        st       %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
498
499        sub      %l6, 1, %l6
500        st       %l6, [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
501
502        /*
503         *  If dispatching is disabled (includes nested interrupt case),
504         *  then do a "simple" exit.
505         */
506
507        orcc     %l6, %g0, %g0   ! Is dispatching disabled?
508        bnz      simple_return   ! Yes, then do a "simple" exit
509        nop
510
511        ! Are we dispatching from a previous ISR in the interrupted thread?
512        ld       [%l5 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE], %l7
513        orcc     %l7, %g0, %g0   ! Is this thread already doing an ISR?
514        bnz      simple_return   ! Yes, then do a "simple" exit
515        nop
516
517
518        /*
519         *  If a context switch is necessary, then do fudge stack to
520         *  return to the interrupt dispatcher.
521         */
522
523        ldub     [%l5 + PER_CPU_DISPATCH_NEEDED], %l6
524
525        orcc     %l6, %g0, %g0   ! Is thread switch necessary?
526        bz       simple_return   ! no, then do a simple return
527        nop
528
529        /*
530         *  Invoke interrupt dispatcher.
531         */
532
533        ! Set ISR dispatch nesting prevention flag
534        mov      1,%l6
535        st       %l6, [%l5 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
536
537        /*
538         *  The following subtract should get us back on the interrupted
539         *  tasks stack and add enough room to invoke the dispatcher.
540         *  When we enable traps, we are mostly back in the context
541         *  of the task and subsequent interrupts can operate normally.
542         */
543
544        sub      %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
545
546        or      %l0, SPARC_PSR_ET_MASK, %l7    ! l7 = PSR with ET=1
547        mov     %l7, %psr                      !  **** ENABLE TRAPS ****
548        nop
549        nop
550        nop
551isr_dispatch:
552        call    SYM(_Thread_Dispatch), 0
553        nop
554
555        /*
556         *  We invoked _Thread_Dispatch in a state similar to the interrupted
557         *  task.  In order to safely be able to tinker with the register
558         *  windows and get the task back to its pre-interrupt state,
559         *  we need to disable interrupts disabled so we can safely tinker
560         *  with the register windowing.  In particular, the CWP in the PSR
561         *  is fragile during this period. (See PR578.)
562         */
563        mov     2,%g1                           ! syscall (disable interrupts)
564        ta      0                               ! syscall (disable interrupts)
565
566        /*
567         *  While we had ISR dispatching disabled in this thread,
568         *  did we miss anything.  If so, then we need to do another
569         *  _Thread_Dispatch before leaving this ISR Dispatch context.
570         */
571
572        GET_SELF_CPU_CONTROL %l5, %l7
573
574        ldub     [%l5 + PER_CPU_DISPATCH_NEEDED], %l7
575
576        orcc     %l7, %g0, %g0    ! Is thread switch necesary?
577        bz       allow_nest_again ! No, then clear out and return
578        nop
579
580        ! Yes, then invoke the dispatcher
581dispatchAgain:
582        mov     3,%g1                           ! syscall (enable interrupts)
583        ta      0                               ! syscall (enable interrupts)
584        ba      isr_dispatch
585        nop
586
587allow_nest_again:
588
589        ! Zero out ISR stack nesting prevention flag
590        st       %g0, [%l5 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
591
592        /*
593         *  The CWP in place at this point may be different from
594         *  that which was in effect at the beginning of the ISR if we
595         *  have been context switched between the beginning of this invocation
596         *  of _ISR_Handler and this point.  Thus the CWP and WIM should
597         *  not be changed back to their values at ISR entry time.  Any
598         *  changes to the PSR must preserve the CWP.
599         */
600
601simple_return:
602        ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
603        wr      %l5, 0, %y
604
605        ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
606        ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
607        rd      %psr, %l3
608        and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
609        andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
610        or      %l3, %l0, %l0                  ! install it later...
611        andn    %l0, SPARC_PSR_ET_MASK, %l0
612
613        /*
614         *  Restore tasks global and out registers
615         */
616
617        mov    %fp, %g1
618
619                                              ! g1 is restored later
620        ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
621        ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
622        ldd     [%fp + ISF_G6_OFFSET], %g6    ! restore g6, g7
623
624        ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
625        ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
626        ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
627        ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
628
629        /*
630         *  Registers:
631         *
632         *   ALL global registers EXCEPT G1 and the input registers have
633         *   already been restored and thuse off limits.
634         *
635         *   The following is the contents of the local registers:
636         *
637         *     l0 = original psr
638         *     l1 = return address (i.e. PC)
639         *     l2 = nPC
640         *     l3 = CWP
641         */
642
643        /*
644         *  if (CWP + 1) is an invalid window then we need to reload it.
645         *
646         *  WARNING: Traps should now be disabled
647         */
648
649        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
650        nop
651        nop
652        nop
653        rd      %wim, %l4
654        add     %l0, 1, %l6                ! l6 = cwp + 1
655        and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
656        srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
657                                           !  and CWP are conveniently LS 5 bits
658        cmp     %l5, 1                     ! Is tasks window invalid?
659        bne     good_task_window
660
661        /*
662         *  The following code is the same as a 1 position left rotate of WIM.
663         */
664
665        sll     %l4, 1, %l5                ! l5 = WIM << 1
666        srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
667                                           ! l4 = WIM >> (Number Windows - 1)
668        or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
669                                           !      (WIM >> (Number Windows - 1))
670
671        /*
672         *  Now restore the window just as if we underflowed to it.
673         */
674
675        wr      %l4, 0, %wim               ! WIM = new WIM
676        nop                                ! must delay after writing WIM
677        nop
678        nop
679        restore                            ! now into the tasks window
680
681        ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
682        ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
683        ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
684        ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
685        ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
686        ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
687        ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
688        ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
689                                           ! reload of sp clobbers ISF
690        save                               ! Back to ISR dispatch window
691
692good_task_window:
693
694        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
695        nop; nop; nop
696                                           !  and restore condition codes.
697        ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
698        jmp     %l1                        ! transfer control and
699        rett    %l2                        ! go back to tasks window
700
701/* end of file */
Note: See TracBrowser for help on using the repository browser.