source: rtems/c/src/lib/libbsp/sparc/shared/irq_asm.S @ 88f6c4fc

4.115
Last change on this file since 88f6c4fc was 88f6c4fc, checked in by Sebastian Huber <sebastian.huber@…>, on 08/02/13 at 12:06:52

sparc: Move _CPU_Context_switch(), etc.

Move the _CPU_Context_switch(), _CPU_Context_restore() and
_CPU_Context_switch_to_first_task_smp() code since the method to obtain
the processor index is BSP specific.

  • Property mode set to 100644
File size: 26.5 KB
Line 
1/*  cpu_asm.s
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language.
6 *
7 *  COPYRIGHT (c) 1989-2011.
8 *  On-Line Applications Research Corporation (OAR).
9 *
10 *  The license and distribution terms for this file may be
11 *  found in the file LICENSE in this distribution or at
12 *  http://www.rtems.com/license/LICENSE.
13 *
14 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
15 *  Research Corporation (OAR) under contract to the European Space
16 *  Agency (ESA).
17 *
18 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
19 *  European Space Agency.
20 */
21
22#include <rtems/asm.h>
23#include <rtems/system.h>
24#include <bspopts.h>
25
26.macro GET_SELF_CPU_CONTROL REG, TMP
27        sethi    %hi(_Per_CPU_Information), \REG
28        add      \REG, %lo(_Per_CPU_Information), \REG
29
30#if defined( RTEMS_SMP )
31#if BSP_LEON3_SMP
32        /* LEON3 SMP support */
33        rd       %asr17, \TMP
34        srl      \TMP, 28, \TMP /* CPU number is upper 4 bits so shift */
35#else
36        mov      0, \TMP
37        nop
38#endif
39        sll      \TMP, PER_CPU_CONTROL_SIZE_LOG2, \TMP
40        add      \REG, \TMP, \REG
41#endif /* defined( RTEMS_SMP ) */
42.endm
43
44/*
45 *  void _CPU_Context_switch(
46 *    Context_Control  *run,
47 *    Context_Control  *heir
48 *  )
49 *
50 *  This routine performs a normal non-FP context switch.
51 */
52
53        .align 4
54        PUBLIC(_CPU_Context_switch)
55SYM(_CPU_Context_switch):
56        ! skip g0
57        st      %g1, [%o0 + G1_OFFSET]       ! save the global registers
58        std     %g2, [%o0 + G2_OFFSET]
59        std     %g4, [%o0 + G4_OFFSET]
60        std     %g6, [%o0 + G6_OFFSET]
61
62        ! load the address of the ISR stack nesting prevention flag
63        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %g2
64        ld       [%g2 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %g2
65        ! save it a bit later so we do not waste a couple of cycles
66
67        std     %l0, [%o0 + L0_OFFSET]       ! save the local registers
68        std     %l2, [%o0 + L2_OFFSET]
69        std     %l4, [%o0 + L4_OFFSET]
70        std     %l6, [%o0 + L6_OFFSET]
71
72        ! Now actually save ISR stack nesting prevention flag
73        st       %g2, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
74
75        std     %i0, [%o0 + I0_OFFSET]       ! save the input registers
76        std     %i2, [%o0 + I2_OFFSET]
77        std     %i4, [%o0 + I4_OFFSET]
78        std     %i6, [%o0 + I6_FP_OFFSET]
79
80        std     %o0, [%o0 + O0_OFFSET]       ! save the output registers
81        std     %o2, [%o0 + O2_OFFSET]
82        std     %o4, [%o0 + O4_OFFSET]
83        std     %o6, [%o0 + O6_SP_OFFSET]
84
85        rd      %psr, %o2
86        st      %o2, [%o0 + PSR_OFFSET]      ! save status register
87
88        /*
89         *  This is entered from _CPU_Context_restore with:
90         *    o1 = context to restore
91         *    o2 = psr
92         */
93
94        PUBLIC(_CPU_Context_restore_heir)
95SYM(_CPU_Context_restore_heir):
96        /*
97         *  Flush all windows with valid contents except the current one.
98         *  In examining the set register windows, one may logically divide
99         *  the windows into sets (some of which may be empty) based on their
100         *  current status:
101         *
102         *    + current (i.e. in use),
103         *    + used (i.e. a restore would not trap)
104         *    + invalid (i.e. 1 in corresponding bit in WIM)
105         *    + unused
106         *
107         *  Either the used or unused set of windows may be empty.
108         *
109         *  NOTE: We assume only one bit is set in the WIM at a time.
110         *
111         *  Given a CWP of 5 and a WIM of 0x1, the registers are divided
112         *  into sets as follows:
113         *
114         *    + 0   - invalid
115         *    + 1-4 - unused
116         *    + 5   - current
117         *    + 6-7 - used
118         *
119         *  In this case, we only would save the used windows -- 6 and 7.
120         *
121         *   Traps are disabled for the same logical period as in a
122         *     flush all windows trap handler.
123         *
124         *    Register Usage while saving the windows:
125         *      g1 = current PSR
126         *      g2 = current wim
127         *      g3 = CWP
128         *      g4 = wim scratch
129         *      g5 = scratch
130         */
131
132        ld      [%o1 + PSR_OFFSET], %g1       ! g1 = saved psr
133
134        and     %o2, SPARC_PSR_CWP_MASK, %g3  ! g3 = CWP
135                                              ! g1 = psr w/o cwp
136        andn    %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1
137        or      %g1, %g3, %g1                 ! g1 = heirs psr
138        mov     %g1, %psr                     ! restore status register and
139                                              ! **** DISABLE TRAPS ****
140        mov     %wim, %g2                     ! g2 = wim
141        mov     1, %g4
142        sll     %g4, %g3, %g4                 ! g4 = WIM mask for CW invalid
143
144save_frame_loop:
145        sll     %g4, 1, %g5                   ! rotate the "wim" left 1
146        srl     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4
147        or      %g4, %g5, %g4                 ! g4 = wim if we do one restore
148
149        /*
150         *  If a restore would not underflow, then continue.
151         */
152
153        andcc   %g4, %g2, %g0                 ! Any windows to flush?
154        bnz     done_flushing                 ! No, then continue
155        nop
156
157        restore                               ! back one window
158
159        /*
160         *  Now save the window just as if we overflowed to it.
161         */
162
163        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
164        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
165        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
166        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
167
168        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
169        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
170        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
171        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
172
173        ba      save_frame_loop
174        nop
175
176done_flushing:
177
178        add     %g3, 1, %g3                   ! calculate desired WIM
179        and     %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3
180        mov     1, %g4
181        sll     %g4, %g3, %g4                 ! g4 = new WIM
182        mov     %g4, %wim
183
184        or      %g1, SPARC_PSR_ET_MASK, %g1
185        mov     %g1, %psr                     ! **** ENABLE TRAPS ****
186                                              !   and restore CWP
187        nop
188        nop
189        nop
190
191        ! skip g0
192        ld      [%o1 + G1_OFFSET], %g1        ! restore the global registers
193        ldd     [%o1 + G2_OFFSET], %g2
194        ldd     [%o1 + G4_OFFSET], %g4
195        ldd     [%o1 + G6_OFFSET], %g6
196
197        ! Load thread specific ISR dispatch prevention flag
198        ld      [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
199        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %o3
200        ! Store it to memory later to use the cycles
201
202        ldd     [%o1 + L0_OFFSET], %l0        ! restore the local registers
203        ldd     [%o1 + L2_OFFSET], %l2
204        ldd     [%o1 + L4_OFFSET], %l4
205        ldd     [%o1 + L6_OFFSET], %l6
206
207        ! Now restore thread specific ISR dispatch prevention flag
208        st       %o2,[%o3 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
209
210        ldd     [%o1 + I0_OFFSET], %i0        ! restore the output registers
211        ldd     [%o1 + I2_OFFSET], %i2
212        ldd     [%o1 + I4_OFFSET], %i4
213        ldd     [%o1 + I6_FP_OFFSET], %i6
214
215        ldd     [%o1 + O2_OFFSET], %o2        ! restore the output registers
216        ldd     [%o1 + O4_OFFSET], %o4
217        ldd     [%o1 + O6_SP_OFFSET], %o6
218        ! do o0/o1 last to avoid destroying heir context pointer
219        ldd     [%o1 + O0_OFFSET], %o0        ! overwrite heir pointer
220
221        jmp     %o7 + 8                       ! return
222        nop                                   ! delay slot
223
224/*
225 *  void _CPU_Context_restore(
226 *    Context_Control *new_context
227 *  )
228 *
229 *  This routine is generally used only to perform restart self.
230 *
231 *  NOTE: It is unnecessary to reload some registers.
232 */
233        .align 4
234        PUBLIC(_CPU_Context_restore)
235SYM(_CPU_Context_restore):
236        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
237        rd      %psr, %o2
238        ba      SYM(_CPU_Context_restore_heir)
239        mov     %i0, %o1                      ! in the delay slot
240        .align 4
241
242#if defined(RTEMS_SMP)
243/*
244 *  void _CPU_Context_switch_to_first_task_smp(
245 *    Context_Control *new_context
246 *  )
247 *
248 *  This routine is only used to switch to the first task on a
249 *  secondary core in an SMP configuration.  We do not need to
250 *  flush any windows and, in fact, this can be dangerous
251 *  as they may or may not be initialized properly.  So we just
252 *  reinitialize the PSR and WIM.
253 */
254        PUBLIC(_CPU_Context_switch_to_first_task_smp)
255SYM(_CPU_Context_switch_to_first_task_smp):
256        mov     %psr, %g1               ! Turn of traps when modifying WIM
257        andn    %g1, SPARC_PSR_ET_MASK, %g1
258        mov     %g1, %psr
259        /* WIM and PSR will be set in done_flushing, it need args:
260         * g1=PSR, g3=CWP, o1=Context
261         */
262        and     %g1, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3
263        nop
264        mov     %o0, %o1                ! in the delay slot
265        ba,a    done_flushing
266#endif
267
268/*
269 *  void _ISR_Handler()
270 *
271 *  This routine provides the RTEMS interrupt management.
272 *
273 *  We enter this handler from the 4 instructions in the trap table with
274 *  the following registers assumed to be set as shown:
275 *
276 *    l0 = PSR
277 *    l1 = PC
278 *    l2 = nPC
279 *    l3 = trap type
280 *
281 *  NOTE: By an executive defined convention, trap type is between 0 and 255 if
282 *        it is an asynchonous trap and 256 and 511 if it is synchronous.
283 */
284
285        .align 4
286        PUBLIC(_ISR_Handler)
287SYM(_ISR_Handler):
288        /*
289         *  Fix the return address for synchronous traps.
290         */
291
292        andcc   %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
293                                      ! Is this a synchronous trap?
294        be,a    win_ovflow            ! No, then skip the adjustment
295        nop                           ! DELAY
296        mov     %l1, %l6              ! save trapped pc for debug info
297        mov     %l2, %l1              ! do not return to the instruction
298        add     %l2, 4, %l2           ! indicated
299
300win_ovflow:
301        /*
302         *  Save the globals this block uses.
303         *
304         *  These registers are not restored from the locals.  Their contents
305         *  are saved directly from the locals into the ISF below.
306         */
307
308        mov     %g4, %l4                 ! save the globals this block uses
309        mov     %g5, %l5
310
311        /*
312         *  When at a "window overflow" trap, (wim == (1 << cwp)).
313         *  If we get here like that, then process a window overflow.
314         */
315
316        rd      %wim, %g4
317        srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
318                                         !   are LS 5 bits ; how convenient :)
319        cmp     %g5, 1                   ! Is this an invalid window?
320        bne     dont_do_the_window       ! No, then skip all this stuff
321        ! we are using the delay slot
322
323        /*
324         *  The following is same as a 1 position right rotate of WIM
325         */
326
327        srl     %g4, 1, %g5              ! g5 = WIM >> 1
328        sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
329                                         ! g4 = WIM << (Number Windows - 1)
330        or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
331                                         !      (WIM << (Number Windows - 1))
332
333        /*
334         *  At this point:
335         *
336         *    g4 = the new WIM
337         *    g5 is free
338         */
339
340        /*
341         *  Since we are tinkering with the register windows, we need to
342         *  make sure that all the required information is in global registers.
343         */
344
345        save                          ! Save into the window
346        wr      %g4, 0, %wim          ! WIM = new WIM
347        nop                           ! delay slots
348        nop
349        nop
350
351        /*
352         *  Now save the window just as if we overflowed to it.
353         */
354
355        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
356        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
357        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
358        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
359
360        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
361        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
362        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
363        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
364
365        restore
366        nop
367
368dont_do_the_window:
369        /*
370         *  Global registers %g4 and %g5 are saved directly from %l4 and
371         *  %l5 directly into the ISF below.
372         */
373
374save_isf:
375
376        /*
377         *  Save the state of the interrupted task -- especially the global
378         *  registers -- in the Interrupt Stack Frame.  Note that the ISF
379         *  includes a regular minimum stack frame which will be used if
380         *  needed by register window overflow and underflow handlers.
381         *
382         *  REGISTERS SAME AS AT _ISR_Handler
383         */
384
385        sub     %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
386                                               ! make space for ISF
387
388        std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
389        st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
390        st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
391        std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
392        std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
393        std     %g6, [%sp + ISF_G6_OFFSET]     ! save g6, g7
394
395        std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
396        std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
397        std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
398        std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
399
400        rd      %y, %g1
401        st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
402        st      %l6, [%sp + ISF_TPC_OFFSET]    ! save real trapped pc
403
404        mov     %sp, %o1                       ! 2nd arg to ISR Handler
405
406        /*
407         *  Increment ISR nest level and Thread dispatch disable level.
408         *
409         *  Register usage for this section:
410         *
411         *    l4 = _Thread_Dispatch_disable_level pointer
412         *    l5 = per cpu info pointer
413         *    l6 = _Thread_Dispatch_disable_level value
414         *    l7 = _ISR_Nest_level value
415         *
416         *  NOTE: It is assumed that l4 - l7 will be preserved until the ISR
417         *        nest and thread dispatch disable levels are unnested.
418         */
419
420        GET_SELF_CPU_CONTROL %l5, %l7
421
422        ld       [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL], %l6
423        ld       [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
424
425        add      %l6, 1, %l6
426        st       %l6, [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
427
428        add      %l7, 1, %l7
429        st       %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
430
431        /*
432         *  If ISR nest level was zero (now 1), then switch stack.
433         */
434
435        mov      %sp, %fp
436        subcc    %l7, 1, %l7             ! outermost interrupt handler?
437        bnz      dont_switch_stacks      ! No, then do not switch stacks
438
439        nop
440        ld       [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
441
442dont_switch_stacks:
443        /*
444         *  Make sure we have a place on the stack for the window overflow
445         *  trap handler to write into.  At this point it is safe to
446         *  enable traps again.
447         */
448
449        sub      %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
450
451        /*
452         *  Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
453         *  set the PIL in the %psr to mask off interrupts with lower priority.
454         *  The original %psr in %l0 is not modified since it will be restored
455         *  when the interrupt handler returns.
456         */
457
458        mov      %l0, %g5
459        and      %l3, 0x0ff, %g4
460
461/* This is a fix for ERC32 with FPU rev.B or rev.C */
462
463#if defined(FPU_REVB)
464
465
466        subcc    %g4, 0x08, %g0
467        be       fpu_revb
468        subcc    %g4, 0x11, %g0
469        bl       dont_fix_pil
470        subcc    %g4, 0x1f, %g0
471        bg       dont_fix_pil
472        sll      %g4, 8, %g4
473        and      %g4, SPARC_PSR_PIL_MASK, %g4
474        andn     %l0, SPARC_PSR_PIL_MASK, %g5
475        or       %g4, %g5, %g5
476        srl      %l0, 12, %g4
477        andcc    %g4, 1, %g0
478        be       dont_fix_pil
479        nop
480        ba,a     enable_irq
481
482
483fpu_revb:
484        srl      %l0, 12, %g4   ! check if EF is set in %psr
485        andcc    %g4, 1, %g0
486        be       dont_fix_pil   ! if FPU disabled than continue as normal
487        and      %l3, 0xff, %g4
488        subcc    %g4, 0x08, %g0
489        bne      enable_irq     ! if not a FPU exception then do two fmovs
490        set      __sparc_fq, %g4
491        st       %fsr, [%g4]    ! if FQ is not empty and FQ[1] = fmovs
492        ld       [%g4], %g4     ! than this is bug 3.14
493        srl      %g4, 13, %g4
494        andcc    %g4, 1, %g0
495        be       dont_fix_pil
496        set      __sparc_fq, %g4
497        std      %fq, [%g4]
498        ld       [%g4+4], %g4
499        set      0x81a00020, %g5
500        subcc    %g4, %g5, %g0
501        bne,a    dont_fix_pil2
502        wr       %l0, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
503        ba,a     simple_return
504       
505enable_irq:
506        or       %g5, SPARC_PSR_PIL_MASK, %g4
507        wr       %g4, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
508        nop; nop; nop
509        fmovs    %f0, %f0
510        ba       dont_fix_pil
511        fmovs    %f0, %f0
512
513        .data
514        .global __sparc_fq
515        .align 8
516__sparc_fq:
517        .word 0,0
518
519        .text
520/* end of ERC32 FPU rev.B/C fix */
521
522#else
523
524        subcc    %g4, 0x11, %g0
525        bl       dont_fix_pil
526        subcc    %g4, 0x1f, %g0
527        bg       dont_fix_pil
528        sll      %g4, 8, %g4
529        and      %g4, SPARC_PSR_PIL_MASK, %g4
530        andn     %l0, SPARC_PSR_PIL_MASK, %g5
531        ba       pil_fixed
532        or       %g4, %g5, %g5
533#endif
534
535dont_fix_pil:
536        or       %g5, SPARC_PSR_PIL_MASK, %g5
537pil_fixed:
538        wr       %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
539dont_fix_pil2:
540
541        /*
542         *  Vector to user's handler.
543         *
544         *  NOTE: TBR may no longer have vector number in it since
545         *        we just enabled traps.  It is definitely in l3.
546         */
547
548        sethi    %hi(SYM(_ISR_Vector_table)), %g4
549        ld       [%g4+%lo(SYM(_ISR_Vector_table))], %g4
550        and      %l3, 0xFF, %g5         ! remove synchronous trap indicator
551        sll      %g5, 2, %g5            ! g5 = offset into table
552        ld       [%g4 + %g5], %g4       ! g4 = _ISR_Vector_table[ vector ]
553
554
555                                        ! o1 = 2nd arg = address of the ISF
556                                        !   WAS LOADED WHEN ISF WAS SAVED!!!
557        mov      %l3, %o0               ! o0 = 1st arg = vector number
558        call     %g4, 0
559        nop                             ! delay slot
560
561        /*
562         *  Redisable traps so we can finish up the interrupt processing.
563         *  This is a VERY conservative place to do this.
564         *
565         *  NOTE: %l0 has the PSR which was in place when we took the trap.
566         */
567
568        mov      %l0, %psr             ! **** DISABLE TRAPS ****
569        nop; nop; nop
570
571        /*
572         *  Decrement ISR nest level and Thread dispatch disable level.
573         *
574         *  Register usage for this section:
575         *
576         *    l4 = _Thread_Dispatch_disable_level pointer
577         *    l5 = per cpu info pointer
578         *    l6 = _Thread_Dispatch_disable_level value
579         *    l7 = _ISR_Nest_level value
580         */
581
582        sub      %l6, 1, %l6
583        st       %l6, [%l5 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
584
585        st       %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
586
587        /*
588         *  If dispatching is disabled (includes nested interrupt case),
589         *  then do a "simple" exit.
590         */
591
592        orcc     %l6, %g0, %g0   ! Is dispatching disabled?
593        bnz      simple_return   ! Yes, then do a "simple" exit
594        ! NOTE: Use the delay slot
595        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l6
596
597        ! Are we dispatching from a previous ISR in the interrupted thread?
598        ld       [%l6 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %l7
599        orcc     %l7, %g0, %g0   ! Is this thread already doing an ISR?
600        bnz      simple_return   ! Yes, then do a "simple" exit
601        nop
602
603
604        /*
605         *  If a context switch is necessary, then do fudge stack to
606         *  return to the interrupt dispatcher.
607         */
608
609        ldub     [%l5 + PER_CPU_DISPATCH_NEEDED], %l5
610
611        orcc     %l5, %g0, %g0   ! Is thread switch necessary?
612        bz       simple_return   ! no, then do a simple return
613        nop
614
615        /*
616         *  Invoke interrupt dispatcher.
617         */
618
619        PUBLIC(_ISR_Dispatch)
620SYM(_ISR_Dispatch):
621        ! Set ISR dispatch nesting prevention flag
622        mov      1,%l6
623        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
624        st       %l6,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
625
626        /*
627         *  The following subtract should get us back on the interrupted
628         *  tasks stack and add enough room to invoke the dispatcher.
629         *  When we enable traps, we are mostly back in the context
630         *  of the task and subsequent interrupts can operate normally.
631         */
632
633        sub      %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
634
635        or      %l0, SPARC_PSR_ET_MASK, %l7    ! l7 = PSR with ET=1
636        mov     %l7, %psr                      !  **** ENABLE TRAPS ****
637        nop
638        nop
639        nop
640isr_dispatch:
641        call    SYM(_Thread_Dispatch), 0
642        nop
643
644        /*
645         *  We invoked _Thread_Dispatch in a state similar to the interrupted
646         *  task.  In order to safely be able to tinker with the register
647         *  windows and get the task back to its pre-interrupt state,
648         *  we need to disable interrupts disabled so we can safely tinker
649         *  with the register windowing.  In particular, the CWP in the PSR
650         *  is fragile during this period. (See PR578.)
651         */
652        mov     2,%g1                           ! syscall (disable interrupts)
653        ta      0                               ! syscall (disable interrupts)
654
655        /*
656         *  While we had ISR dispatching disabled in this thread,
657         *  did we miss anything.  If so, then we need to do another
658         *  _Thread_Dispatch before leaving this ISR Dispatch context.
659         */
660
661        GET_SELF_CPU_CONTROL %l5, %l7
662
663        ldub     [%l5 + PER_CPU_DISPATCH_NEEDED], %l7
664
665        orcc     %l7, %g0, %g0    ! Is thread switch necesary?
666        bz       allow_nest_again ! No, then clear out and return
667        nop
668
669        ! Yes, then invoke the dispatcher
670dispatchAgain:
671        mov     3,%g1                           ! syscall (enable interrupts)
672        ta      0                               ! syscall (enable interrupts)
673        ba      isr_dispatch
674        nop
675
676allow_nest_again:
677
678        ! Zero out ISR stack nesting prevention flag
679        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
680        st       %g0,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
681
682        /*
683         *  The CWP in place at this point may be different from
684         *  that which was in effect at the beginning of the ISR if we
685         *  have been context switched between the beginning of this invocation
686         *  of _ISR_Handler and this point.  Thus the CWP and WIM should
687         *  not be changed back to their values at ISR entry time.  Any
688         *  changes to the PSR must preserve the CWP.
689         */
690
691simple_return:
692        ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
693        wr      %l5, 0, %y
694
695        ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
696        ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
697        rd      %psr, %l3
698        and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
699        andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
700        or      %l3, %l0, %l0                  ! install it later...
701        andn    %l0, SPARC_PSR_ET_MASK, %l0
702
703        /*
704         *  Restore tasks global and out registers
705         */
706
707        mov    %fp, %g1
708
709                                              ! g1 is restored later
710        ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
711        ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
712        ldd     [%fp + ISF_G6_OFFSET], %g6    ! restore g6, g7
713
714        ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
715        ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
716        ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
717        ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
718
719        /*
720         *  Registers:
721         *
722         *   ALL global registers EXCEPT G1 and the input registers have
723         *   already been restored and thuse off limits.
724         *
725         *   The following is the contents of the local registers:
726         *
727         *     l0 = original psr
728         *     l1 = return address (i.e. PC)
729         *     l2 = nPC
730         *     l3 = CWP
731         */
732
733        /*
734         *  if (CWP + 1) is an invalid window then we need to reload it.
735         *
736         *  WARNING: Traps should now be disabled
737         */
738
739        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
740        nop
741        nop
742        nop
743        rd      %wim, %l4
744        add     %l0, 1, %l6                ! l6 = cwp + 1
745        and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
746        srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
747                                           !  and CWP are conveniently LS 5 bits
748        cmp     %l5, 1                     ! Is tasks window invalid?
749        bne     good_task_window
750
751        /*
752         *  The following code is the same as a 1 position left rotate of WIM.
753         */
754
755        sll     %l4, 1, %l5                ! l5 = WIM << 1
756        srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
757                                           ! l4 = WIM >> (Number Windows - 1)
758        or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
759                                           !      (WIM >> (Number Windows - 1))
760
761        /*
762         *  Now restore the window just as if we underflowed to it.
763         */
764
765        wr      %l4, 0, %wim               ! WIM = new WIM
766        nop                                ! must delay after writing WIM
767        nop
768        nop
769        restore                            ! now into the tasks window
770
771        ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
772        ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
773        ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
774        ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
775        ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
776        ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
777        ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
778        ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
779                                           ! reload of sp clobbers ISF
780        save                               ! Back to ISR dispatch window
781
782good_task_window:
783
784        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
785        nop; nop; nop
786                                           !  and restore condition codes.
787        ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
788        jmp     %l1                        ! transfer control and
789        rett    %l2                        ! go back to tasks window
790
791/* end of file */
Note: See TracBrowser for help on using the repository browser.