source: rtems/c/src/lib/libbsp/sparc/shared/irq_asm.S @ 97cf623d

4.115
Last change on this file since 97cf623d was 97cf623d, checked in by Sebastian Huber <sebastian.huber@…>, on 02/05/14 at 10:36:05

sparc: Save/restore only non-volatile context

The _CPU_Context_switch() is a normal function call. The following
registers are volatile (the caller must assume that the register
contents are destroyed by the callee) according to "SYSTEM V APPLICATION
BINARY INTERFACE - SPARC Processor Supplement", Third Edition: g1, o0,
o1, o2, o3, o4, o5. Drop these registers from the context.

Ensure that offset defines match the structure offsets.

  • Property mode set to 100644
File size: 24.9 KB
Line 
1/*  cpu_asm.s
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language.
6 *
7 *  COPYRIGHT (c) 1989-2011.
8 *  On-Line Applications Research Corporation (OAR).
9 *
10 *  The license and distribution terms for this file may be
11 *  found in the file LICENSE in this distribution or at
12 *  http://www.rtems.com/license/LICENSE.
13 *
14 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
15 *  Research Corporation (OAR) under contract to the European Space
16 *  Agency (ESA).
17 *
18 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
19 *  European Space Agency.
20 */
21
22#include <rtems/asm.h>
23#include <rtems/score/percpu.h>
24#include <bspopts.h>
25
26.macro GET_SELF_CPU_CONTROL REG, TMP
27        sethi    %hi(_Per_CPU_Information), \REG
28        add      \REG, %lo(_Per_CPU_Information), \REG
29
30#if defined( RTEMS_SMP )
31#if BSP_LEON3_SMP
32        /* LEON3 SMP support */
33        rd       %asr17, \TMP
34        srl      \TMP, 28, \TMP /* CPU number is upper 4 bits so shift */
35#else
36        mov      0, \TMP
37#endif
38        sll      \TMP, PER_CPU_CONTROL_SIZE_LOG2, \TMP
39        add      \REG, \TMP, \REG
40#endif /* defined( RTEMS_SMP ) */
41.endm
42
43/*
44 *  void _CPU_Context_switch(
45 *    Context_Control  *run,
46 *    Context_Control  *heir
47 *  )
48 *
49 *  This routine performs a normal non-FP context switch.
50 */
51
52        .align 4
53        PUBLIC(_CPU_Context_switch)
54SYM(_CPU_Context_switch):
55        std     %g2, [%o0 + G2_OFFSET]       ! save the global registers
56        std     %g4, [%o0 + G4_OFFSET]
57        std     %g6, [%o0 + G6_OFFSET]
58
59        std     %l0, [%o0 + L0_OFFSET]       ! save the local registers
60        std     %l2, [%o0 + L2_OFFSET]
61        std     %l4, [%o0 + L4_OFFSET]
62        std     %l6, [%o0 + L6_OFFSET]
63
64        std     %i0, [%o0 + I0_OFFSET]       ! save the input registers
65        std     %i2, [%o0 + I2_OFFSET]
66        std     %i4, [%o0 + I4_OFFSET]
67        std     %i6, [%o0 + I6_FP_OFFSET]
68
69        std     %o6, [%o0 + O6_SP_OFFSET]    ! save the output registers
70
71        ! o3 = self per-CPU control
72        GET_SELF_CPU_CONTROL %o3, %o4
73
74        ! load the ISR stack nesting prevention flag
75        ld      [%o3 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE], %o4
76        ! save it a bit later so we do not waste a couple of cycles
77
78        rd      %psr, %o2
79        st      %o2, [%o0 + PSR_OFFSET]      ! save status register
80
81        ! Now actually save ISR stack nesting prevention flag
82        st       %o4, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
83
84        /*
85         *  This is entered from _CPU_Context_restore with:
86         *    o1 = context to restore
87         *    o2 = psr
88         *    o3 = self per-CPU control
89         */
90
91        PUBLIC(_CPU_Context_restore_heir)
92SYM(_CPU_Context_restore_heir):
93        /*
94         *  Flush all windows with valid contents except the current one.
95         *  In examining the set register windows, one may logically divide
96         *  the windows into sets (some of which may be empty) based on their
97         *  current status:
98         *
99         *    + current (i.e. in use),
100         *    + used (i.e. a restore would not trap)
101         *    + invalid (i.e. 1 in corresponding bit in WIM)
102         *    + unused
103         *
104         *  Either the used or unused set of windows may be empty.
105         *
106         *  NOTE: We assume only one bit is set in the WIM at a time.
107         *
108         *  Given a CWP of 5 and a WIM of 0x1, the registers are divided
109         *  into sets as follows:
110         *
111         *    + 0   - invalid
112         *    + 1-4 - unused
113         *    + 5   - current
114         *    + 6-7 - used
115         *
116         *  In this case, we only would save the used windows -- 6 and 7.
117         *
118         *   Traps are disabled for the same logical period as in a
119         *     flush all windows trap handler.
120         *
121         *    Register Usage while saving the windows:
122         *      g1 = current PSR
123         *      g2 = current wim
124         *      g3 = CWP
125         *      g4 = wim scratch
126         *      g5 = scratch
127         */
128
129        ld      [%o1 + PSR_OFFSET], %g1       ! g1 = saved psr
130
131        and     %o2, SPARC_PSR_CWP_MASK, %g3  ! g3 = CWP
132                                              ! g1 = psr w/o cwp
133        andn    %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1
134        or      %g1, %g3, %g1                 ! g1 = heirs psr
135        mov     %g1, %psr                     ! restore status register and
136                                              ! **** DISABLE TRAPS ****
137        mov     %wim, %g2                     ! g2 = wim
138        mov     1, %g4
139        sll     %g4, %g3, %g4                 ! g4 = WIM mask for CW invalid
140
141save_frame_loop:
142        sll     %g4, 1, %g5                   ! rotate the "wim" left 1
143        srl     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4
144        or      %g4, %g5, %g4                 ! g4 = wim if we do one restore
145
146        /*
147         *  If a restore would not underflow, then continue.
148         */
149
150        andcc   %g4, %g2, %g0                 ! Any windows to flush?
151        bnz     done_flushing                 ! No, then continue
152        nop
153
154        restore                               ! back one window
155
156        /*
157         *  Now save the window just as if we overflowed to it.
158         */
159
160        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
161        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
162        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
163        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
164
165        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
166        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
167        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
168        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
169
170        ba      save_frame_loop
171        nop
172
173done_flushing:
174
175        add     %g3, 1, %g3                   ! calculate desired WIM
176        and     %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3
177        mov     1, %g4
178        sll     %g4, %g3, %g4                 ! g4 = new WIM
179        mov     %g4, %wim
180
181        or      %g1, SPARC_PSR_ET_MASK, %g1
182        mov     %g1, %psr                     ! **** ENABLE TRAPS ****
183                                              !   and restore CWP
184        nop
185        nop
186        nop
187
188        ldd     [%o1 + G2_OFFSET], %g2        ! restore the global registers
189        ldd     [%o1 + G4_OFFSET], %g4
190        ldd     [%o1 + G6_OFFSET], %g6
191
192        ! Load thread specific ISR dispatch prevention flag
193        ld      [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
194        ! Store it to memory later to use the cycles
195
196        ldd     [%o1 + L0_OFFSET], %l0        ! restore the local registers
197        ldd     [%o1 + L2_OFFSET], %l2
198        ldd     [%o1 + L4_OFFSET], %l4
199        ldd     [%o1 + L6_OFFSET], %l6
200
201        ! Now restore thread specific ISR dispatch prevention flag
202        st      %o2, [%o3 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
203
204        ldd     [%o1 + I0_OFFSET], %i0        ! restore the input registers
205        ldd     [%o1 + I2_OFFSET], %i2
206        ldd     [%o1 + I4_OFFSET], %i4
207        ldd     [%o1 + I6_FP_OFFSET], %i6
208
209        ldd     [%o1 + O6_SP_OFFSET], %o6     ! restore the output registers
210
211        jmp     %o7 + 8                       ! return
212        nop                                   ! delay slot
213
214/*
215 *  void _CPU_Context_restore(
216 *    Context_Control *new_context
217 *  )
218 *
219 *  This routine is generally used only to perform restart self.
220 *
221 *  NOTE: It is unnecessary to reload some registers.
222 */
223        .align 4
224        PUBLIC(_CPU_Context_restore)
225SYM(_CPU_Context_restore):
226        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
227        rd      %psr, %o2
228        GET_SELF_CPU_CONTROL %o3, %o4
229        ba      SYM(_CPU_Context_restore_heir)
230        mov     %i0, %o1                      ! in the delay slot
231
232/*
233 *  void _ISR_Handler()
234 *
235 *  This routine provides the RTEMS interrupt management.
236 *
237 *  We enter this handler from the 4 instructions in the trap table with
238 *  the following registers assumed to be set as shown:
239 *
240 *    l0 = PSR
241 *    l1 = PC
242 *    l2 = nPC
243 *    l3 = trap type
244 *
245 *  NOTE: By an executive defined convention, trap type is between 0 and 255 if
246 *        it is an asynchonous trap and 256 and 511 if it is synchronous.
247 */
248
249        .align 4
250        PUBLIC(_ISR_Handler)
251SYM(_ISR_Handler):
252        /*
253         *  Fix the return address for synchronous traps.
254         */
255
256        andcc   %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
257                                      ! Is this a synchronous trap?
258        be,a    win_ovflow            ! No, then skip the adjustment
259        nop                           ! DELAY
260        mov     %l1, %l6              ! save trapped pc for debug info
261        mov     %l2, %l1              ! do not return to the instruction
262        add     %l2, 4, %l2           ! indicated
263
264win_ovflow:
265        /*
266         *  Save the globals this block uses.
267         *
268         *  These registers are not restored from the locals.  Their contents
269         *  are saved directly from the locals into the ISF below.
270         */
271
272        mov     %g4, %l4                 ! save the globals this block uses
273        mov     %g5, %l5
274
275        /*
276         *  When at a "window overflow" trap, (wim == (1 << cwp)).
277         *  If we get here like that, then process a window overflow.
278         */
279
280        rd      %wim, %g4
281        srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
282                                         !   are LS 5 bits ; how convenient :)
283        cmp     %g5, 1                   ! Is this an invalid window?
284        bne     dont_do_the_window       ! No, then skip all this stuff
285        ! we are using the delay slot
286
287        /*
288         *  The following is same as a 1 position right rotate of WIM
289         */
290
291        srl     %g4, 1, %g5              ! g5 = WIM >> 1
292        sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
293                                         ! g4 = WIM << (Number Windows - 1)
294        or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
295                                         !      (WIM << (Number Windows - 1))
296
297        /*
298         *  At this point:
299         *
300         *    g4 = the new WIM
301         *    g5 is free
302         */
303
304        /*
305         *  Since we are tinkering with the register windows, we need to
306         *  make sure that all the required information is in global registers.
307         */
308
309        save                          ! Save into the window
310        wr      %g4, 0, %wim          ! WIM = new WIM
311        nop                           ! delay slots
312        nop
313        nop
314
315        /*
316         *  Now save the window just as if we overflowed to it.
317         */
318
319        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
320        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
321        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
322        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
323
324        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
325        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
326        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
327        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
328
329        restore
330        nop
331
332dont_do_the_window:
333        /*
334         *  Global registers %g4 and %g5 are saved directly from %l4 and
335         *  %l5 directly into the ISF below.
336         */
337
338save_isf:
339
340        /*
341         *  Save the state of the interrupted task -- especially the global
342         *  registers -- in the Interrupt Stack Frame.  Note that the ISF
343         *  includes a regular minimum stack frame which will be used if
344         *  needed by register window overflow and underflow handlers.
345         *
346         *  REGISTERS SAME AS AT _ISR_Handler
347         */
348
349        sub     %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
350                                               ! make space for ISF
351
352        std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
353        st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
354        st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
355        std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
356        std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
357        std     %g6, [%sp + ISF_G6_OFFSET]     ! save g6, g7
358
359        std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
360        std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
361        std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
362        std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
363
364        rd      %y, %g1
365        st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
366        st      %l6, [%sp + ISF_TPC_OFFSET]    ! save real trapped pc
367
368        mov     %sp, %o1                       ! 2nd arg to ISR Handler
369
370        /*
371         *  Increment ISR nest level and Thread dispatch disable level.
372         *
373         *  Register usage for this section:
374         *
375         *    l5 = per cpu info pointer
376         *    l6 = _Thread_Dispatch_disable_level value
377         *    l7 = _ISR_Nest_level value
378         *
379         *  NOTE: It is assumed that l5 - l7 will be preserved until the ISR
380         *        nest and thread dispatch disable levels are unnested.
381         */
382
383        GET_SELF_CPU_CONTROL %l5, %l7
384
385        ld       [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
386        ld       [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL], %l6
387
388        add      %l7, 1, %l7
389        st       %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
390
391        add      %l6, 1, %l6
392        st       %l6, [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
393
394        /*
395         *  If ISR nest level was zero (now 1), then switch stack.
396         */
397
398        mov      %sp, %fp
399        subcc    %l7, 1, %l7             ! outermost interrupt handler?
400        bnz      dont_switch_stacks      ! No, then do not switch stacks
401
402        nop
403        ld       [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
404
405dont_switch_stacks:
406        /*
407         *  Make sure we have a place on the stack for the window overflow
408         *  trap handler to write into.  At this point it is safe to
409         *  enable traps again.
410         */
411
412        sub      %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
413
414        /*
415         *  Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
416         *  set the PIL in the %psr to mask off interrupts with lower priority.
417         *  The original %psr in %l0 is not modified since it will be restored
418         *  when the interrupt handler returns.
419         */
420
421        mov      %l0, %g5
422        and      %l3, 0x0ff, %g4
423
424/* This is a fix for ERC32 with FPU rev.B or rev.C */
425
426#if defined(FPU_REVB)
427
428
429        subcc    %g4, 0x08, %g0
430        be       fpu_revb
431        subcc    %g4, 0x11, %g0
432        bl       dont_fix_pil
433        subcc    %g4, 0x1f, %g0
434        bg       dont_fix_pil
435        sll      %g4, 8, %g4
436        and      %g4, SPARC_PSR_PIL_MASK, %g4
437        andn     %l0, SPARC_PSR_PIL_MASK, %g5
438        or       %g4, %g5, %g5
439        srl      %l0, 12, %g4
440        andcc    %g4, 1, %g0
441        be       dont_fix_pil
442        nop
443        ba,a     enable_irq
444
445
446fpu_revb:
447        srl      %l0, 12, %g4   ! check if EF is set in %psr
448        andcc    %g4, 1, %g0
449        be       dont_fix_pil   ! if FPU disabled than continue as normal
450        and      %l3, 0xff, %g4
451        subcc    %g4, 0x08, %g0
452        bne      enable_irq     ! if not a FPU exception then do two fmovs
453        set      __sparc_fq, %g4
454        st       %fsr, [%g4]    ! if FQ is not empty and FQ[1] = fmovs
455        ld       [%g4], %g4     ! than this is bug 3.14
456        srl      %g4, 13, %g4
457        andcc    %g4, 1, %g0
458        be       dont_fix_pil
459        set      __sparc_fq, %g4
460        std      %fq, [%g4]
461        ld       [%g4+4], %g4
462        set      0x81a00020, %g5
463        subcc    %g4, %g5, %g0
464        bne,a    dont_fix_pil2
465        wr       %l0, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
466        ba,a     simple_return
467       
468enable_irq:
469        or       %g5, SPARC_PSR_PIL_MASK, %g4
470        wr       %g4, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
471        nop; nop; nop
472        fmovs    %f0, %f0
473        ba       dont_fix_pil
474        fmovs    %f0, %f0
475
476        .data
477        .global __sparc_fq
478        .align 8
479__sparc_fq:
480        .word 0,0
481
482        .text
483/* end of ERC32 FPU rev.B/C fix */
484
485#else
486
487        subcc    %g4, 0x11, %g0
488        bl       dont_fix_pil
489        subcc    %g4, 0x1f, %g0
490        bg       dont_fix_pil
491        sll      %g4, 8, %g4
492        and      %g4, SPARC_PSR_PIL_MASK, %g4
493        andn     %l0, SPARC_PSR_PIL_MASK, %g5
494        ba       pil_fixed
495        or       %g4, %g5, %g5
496#endif
497
498dont_fix_pil:
499        or       %g5, SPARC_PSR_PIL_MASK, %g5
500pil_fixed:
501        wr       %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
502dont_fix_pil2:
503
504        /*
505         *  Vector to user's handler.
506         *
507         *  NOTE: TBR may no longer have vector number in it since
508         *        we just enabled traps.  It is definitely in l3.
509         */
510
511        sethi    %hi(SYM(_ISR_Vector_table)), %g4
512        ld       [%g4+%lo(SYM(_ISR_Vector_table))], %g4
513        and      %l3, 0xFF, %g5         ! remove synchronous trap indicator
514        sll      %g5, 2, %g5            ! g5 = offset into table
515        ld       [%g4 + %g5], %g4       ! g4 = _ISR_Vector_table[ vector ]
516
517
518                                        ! o1 = 2nd arg = address of the ISF
519                                        !   WAS LOADED WHEN ISF WAS SAVED!!!
520        mov      %l3, %o0               ! o0 = 1st arg = vector number
521        call     %g4, 0
522        nop                             ! delay slot
523
524        /*
525         *  Redisable traps so we can finish up the interrupt processing.
526         *  This is a VERY conservative place to do this.
527         *
528         *  NOTE: %l0 has the PSR which was in place when we took the trap.
529         */
530
531        mov      %l0, %psr             ! **** DISABLE TRAPS ****
532        nop; nop; nop
533
534        /*
535         *  Decrement ISR nest level and Thread dispatch disable level.
536         *
537         *  Register usage for this section:
538         *
539         *    l5 = per cpu info pointer
540         *    l6 = _Thread_Dispatch_disable_level value
541         *    l7 = _ISR_Nest_level value
542         */
543
544        st       %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
545
546        sub      %l6, 1, %l6
547        st       %l6, [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
548
549        /*
550         *  If dispatching is disabled (includes nested interrupt case),
551         *  then do a "simple" exit.
552         */
553
554        orcc     %l6, %g0, %g0   ! Is dispatching disabled?
555        bnz      simple_return   ! Yes, then do a "simple" exit
556        nop
557
558        ! Are we dispatching from a previous ISR in the interrupted thread?
559        ld       [%l5 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE], %l7
560        orcc     %l7, %g0, %g0   ! Is this thread already doing an ISR?
561        bnz      simple_return   ! Yes, then do a "simple" exit
562        nop
563
564
565        /*
566         *  If a context switch is necessary, then do fudge stack to
567         *  return to the interrupt dispatcher.
568         */
569
570        ldub     [%l5 + PER_CPU_DISPATCH_NEEDED], %l6
571
572        orcc     %l6, %g0, %g0   ! Is thread switch necessary?
573        bz       simple_return   ! no, then do a simple return
574        nop
575
576        /*
577         *  Invoke interrupt dispatcher.
578         */
579
580        ! Set ISR dispatch nesting prevention flag
581        mov      1,%l6
582        st       %l6, [%l5 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
583
584        /*
585         *  The following subtract should get us back on the interrupted
586         *  tasks stack and add enough room to invoke the dispatcher.
587         *  When we enable traps, we are mostly back in the context
588         *  of the task and subsequent interrupts can operate normally.
589         */
590
591        sub      %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
592
593        or      %l0, SPARC_PSR_ET_MASK, %l7    ! l7 = PSR with ET=1
594        mov     %l7, %psr                      !  **** ENABLE TRAPS ****
595        nop
596        nop
597        nop
598isr_dispatch:
599        call    SYM(_Thread_Dispatch), 0
600        nop
601
602        /*
603         *  We invoked _Thread_Dispatch in a state similar to the interrupted
604         *  task.  In order to safely be able to tinker with the register
605         *  windows and get the task back to its pre-interrupt state,
606         *  we need to disable interrupts disabled so we can safely tinker
607         *  with the register windowing.  In particular, the CWP in the PSR
608         *  is fragile during this period. (See PR578.)
609         */
610        mov     2,%g1                           ! syscall (disable interrupts)
611        ta      0                               ! syscall (disable interrupts)
612
613        /*
614         *  While we had ISR dispatching disabled in this thread,
615         *  did we miss anything.  If so, then we need to do another
616         *  _Thread_Dispatch before leaving this ISR Dispatch context.
617         */
618
619        GET_SELF_CPU_CONTROL %l5, %l7
620
621        ldub     [%l5 + PER_CPU_DISPATCH_NEEDED], %l7
622
623        orcc     %l7, %g0, %g0    ! Is thread switch necesary?
624        bz       allow_nest_again ! No, then clear out and return
625        nop
626
627        ! Yes, then invoke the dispatcher
628dispatchAgain:
629        mov     3,%g1                           ! syscall (enable interrupts)
630        ta      0                               ! syscall (enable interrupts)
631        ba      isr_dispatch
632        nop
633
634allow_nest_again:
635
636        ! Zero out ISR stack nesting prevention flag
637        st       %g0, [%l5 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
638
639        /*
640         *  The CWP in place at this point may be different from
641         *  that which was in effect at the beginning of the ISR if we
642         *  have been context switched between the beginning of this invocation
643         *  of _ISR_Handler and this point.  Thus the CWP and WIM should
644         *  not be changed back to their values at ISR entry time.  Any
645         *  changes to the PSR must preserve the CWP.
646         */
647
648simple_return:
649        ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
650        wr      %l5, 0, %y
651
652        ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
653        ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
654        rd      %psr, %l3
655        and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
656        andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
657        or      %l3, %l0, %l0                  ! install it later...
658        andn    %l0, SPARC_PSR_ET_MASK, %l0
659
660        /*
661         *  Restore tasks global and out registers
662         */
663
664        mov    %fp, %g1
665
666                                              ! g1 is restored later
667        ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
668        ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
669        ldd     [%fp + ISF_G6_OFFSET], %g6    ! restore g6, g7
670
671        ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
672        ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
673        ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
674        ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
675
676        /*
677         *  Registers:
678         *
679         *   ALL global registers EXCEPT G1 and the input registers have
680         *   already been restored and thuse off limits.
681         *
682         *   The following is the contents of the local registers:
683         *
684         *     l0 = original psr
685         *     l1 = return address (i.e. PC)
686         *     l2 = nPC
687         *     l3 = CWP
688         */
689
690        /*
691         *  if (CWP + 1) is an invalid window then we need to reload it.
692         *
693         *  WARNING: Traps should now be disabled
694         */
695
696        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
697        nop
698        nop
699        nop
700        rd      %wim, %l4
701        add     %l0, 1, %l6                ! l6 = cwp + 1
702        and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
703        srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
704                                           !  and CWP are conveniently LS 5 bits
705        cmp     %l5, 1                     ! Is tasks window invalid?
706        bne     good_task_window
707
708        /*
709         *  The following code is the same as a 1 position left rotate of WIM.
710         */
711
712        sll     %l4, 1, %l5                ! l5 = WIM << 1
713        srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
714                                           ! l4 = WIM >> (Number Windows - 1)
715        or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
716                                           !      (WIM >> (Number Windows - 1))
717
718        /*
719         *  Now restore the window just as if we underflowed to it.
720         */
721
722        wr      %l4, 0, %wim               ! WIM = new WIM
723        nop                                ! must delay after writing WIM
724        nop
725        nop
726        restore                            ! now into the tasks window
727
728        ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
729        ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
730        ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
731        ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
732        ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
733        ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
734        ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
735        ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
736                                           ! reload of sp clobbers ISF
737        save                               ! Back to ISR dispatch window
738
739good_task_window:
740
741        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
742        nop; nop; nop
743                                           !  and restore condition codes.
744        ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
745        jmp     %l1                        ! transfer control and
746        rett    %l2                        ! go back to tasks window
747
748/* end of file */
Note: See TracBrowser for help on using the repository browser.