source: rtems/cpukit/score/cpu/sparc/cpu_asm.S @ 882b1875

4.104.115
Last change on this file since 882b1875 was 0067feb, checked in by Joel Sherrill <joel.sherrill@…>, on 03/12/09 at 14:16:50

2009-03-12 Joel Sherrill <joel.sherrill@…>

PR 1385/cpukit

  • cpu_asm.S: When the type rtems_boolean was switched to the C99 bool, the size changed from 4 bytes to 1 byte. The interrupt dispatching code accesses two boolean variables for scheduling purposes and the assembly implementations of this code did not get updated.
  • Property mode set to 100644
File size: 30.5 KB
Line 
1/*  cpu_asm.s
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language.
6 *
7 *  COPYRIGHT (c) 1989-2007.
8 *  On-Line Applications Research Corporation (OAR).
9 *
10 *  The license and distribution terms for this file may be
11 *  found in the file LICENSE in this distribution or at
12 *  http://www.rtems.com/license/LICENSE.
13 *
14 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
15 *  Research Corporation (OAR) under contract to the European Space
16 *  Agency (ESA).
17 *
18 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
19 *  European Space Agency.
20 *
21 *  $Id$
22 */
23
24#include <rtems/asm.h>
25
26#if (SPARC_HAS_FPU == 1)
27
28/*
29 *  void _CPU_Context_save_fp(
30 *    void **fp_context_ptr
31 *  )
32 *
33 *  This routine is responsible for saving the FP context
34 *  at *fp_context_ptr.  If the point to load the FP context
35 *  from is changed then the pointer is modified by this routine.
36 *
37 *  NOTE: See the README in this directory for information on the
38 *        management of the "EF" bit in the PSR.
39 */
40
41        .align 4
42        PUBLIC(_CPU_Context_save_fp)
43SYM(_CPU_Context_save_fp):
44        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
45
46        /*
47         *  The following enables the floating point unit.
48         */
49   
50        mov     %psr, %l0
51        sethi   %hi(SPARC_PSR_EF_MASK), %l1
52        or      %l1, %lo(SPARC_PSR_EF_MASK), %l1
53        or      %l0, %l1, %l0
54        mov     %l0, %psr                  ! **** ENABLE FLOAT ACCESS ****
55        nop; nop; nop;                     ! Need three nops before EF is
56        ld      [%i0], %l0                 ! active due to pipeline delay!!!
57        std     %f0, [%l0 + FO_F1_OFFSET]
58        std     %f2, [%l0 + F2_F3_OFFSET]
59        std     %f4, [%l0 + F4_F5_OFFSET]
60        std     %f6, [%l0 + F6_F7_OFFSET]
61        std     %f8, [%l0 + F8_F9_OFFSET]
62        std     %f10, [%l0 + F1O_F11_OFFSET]
63        std     %f12, [%l0 + F12_F13_OFFSET]
64        std     %f14, [%l0 + F14_F15_OFFSET]
65        std     %f16, [%l0 + F16_F17_OFFSET]
66        std     %f18, [%l0 + F18_F19_OFFSET]
67        std     %f20, [%l0 + F2O_F21_OFFSET]
68        std     %f22, [%l0 + F22_F23_OFFSET]
69        std     %f24, [%l0 + F24_F25_OFFSET]
70        std     %f26, [%l0 + F26_F27_OFFSET]
71        std     %f28, [%l0 + F28_F29_OFFSET]
72        std     %f30, [%l0 + F3O_F31_OFFSET]
73        st      %fsr, [%l0 + FSR_OFFSET]
74        ret
75        restore
76
77/*
78 *  void _CPU_Context_restore_fp(
79 *    void **fp_context_ptr
80 *  )
81 *
82 *  This routine is responsible for restoring the FP context
83 *  at *fp_context_ptr.  If the point to load the FP context
84 *  from is changed then the pointer is modified by this routine.
85 *
86 *  NOTE: See the README in this directory for information on the
87 *        management of the "EF" bit in the PSR.
88 */
89
90        .align 4
91        PUBLIC(_CPU_Context_restore_fp)
92SYM(_CPU_Context_restore_fp):
93        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE , %sp
94
95        /*
96         *  The following enables the floating point unit.
97         */
98   
99        mov     %psr, %l0
100        sethi   %hi(SPARC_PSR_EF_MASK), %l1
101        or      %l1, %lo(SPARC_PSR_EF_MASK), %l1
102        or      %l0, %l1, %l0
103        mov     %l0, %psr                  ! **** ENABLE FLOAT ACCESS ****
104        nop; nop; nop;                     ! Need three nops before EF is
105        ld      [%i0], %l0                 ! active due to pipeline delay!!!
106        ldd     [%l0 + FO_F1_OFFSET], %f0
107        ldd     [%l0 + F2_F3_OFFSET], %f2
108        ldd     [%l0 + F4_F5_OFFSET], %f4
109        ldd     [%l0 + F6_F7_OFFSET], %f6
110        ldd     [%l0 + F8_F9_OFFSET], %f8
111        ldd     [%l0 + F1O_F11_OFFSET], %f10
112        ldd     [%l0 + F12_F13_OFFSET], %f12
113        ldd     [%l0 + F14_F15_OFFSET], %f14
114        ldd     [%l0 + F16_F17_OFFSET], %f16
115        ldd     [%l0 + F18_F19_OFFSET], %f18
116        ldd     [%l0 + F2O_F21_OFFSET], %f20
117        ldd     [%l0 + F22_F23_OFFSET], %f22
118        ldd     [%l0 + F24_F25_OFFSET], %f24
119        ldd     [%l0 + F26_F27_OFFSET], %f26
120        ldd     [%l0 + F28_F29_OFFSET], %f28
121        ldd     [%l0 + F3O_F31_OFFSET], %f30
122        ld      [%l0 + FSR_OFFSET], %fsr
123        ret
124        restore
125
126#endif /* SPARC_HAS_FPU */
127
128/*
129 *  void _CPU_Context_switch(
130 *    Context_Control  *run,
131 *    Context_Control  *heir
132 *  )
133 *
134 *  This routine performs a normal non-FP context switch.
135 */
136
137        .align 4
138        PUBLIC(_CPU_Context_switch)
139SYM(_CPU_Context_switch):
140        ! skip g0
141        st      %g1, [%o0 + G1_OFFSET]       ! save the global registers
142        std     %g2, [%o0 + G2_OFFSET]
143        std     %g4, [%o0 + G4_OFFSET]
144        std     %g6, [%o0 + G6_OFFSET]
145
146        ! load the address of the ISR stack nesting prevention flag
147        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %g2
148        ld       [%g2 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %g2
149        ! save it a bit later so we do not waste a couple of cycles
150
151        std     %l0, [%o0 + L0_OFFSET]       ! save the local registers
152        std     %l2, [%o0 + L2_OFFSET]
153        std     %l4, [%o0 + L4_OFFSET]
154        std     %l6, [%o0 + L6_OFFSET]
155
156        ! Now actually save ISR stack nesting prevention flag
157        st       %g2, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
158
159        std     %i0, [%o0 + I0_OFFSET]       ! save the input registers
160        std     %i2, [%o0 + I2_OFFSET]
161        std     %i4, [%o0 + I4_OFFSET]
162        std     %i6, [%o0 + I6_FP_OFFSET]
163
164        std     %o0, [%o0 + O0_OFFSET]       ! save the output registers
165        std     %o2, [%o0 + O2_OFFSET]
166        std     %o4, [%o0 + O4_OFFSET]
167        std     %o6, [%o0 + O6_SP_OFFSET]
168
169        rd      %psr, %o2
170        st      %o2, [%o0 + PSR_OFFSET]      ! save status register
171
172        /*
173         *  This is entered from _CPU_Context_restore with:
174         *    o1 = context to restore
175         *    o2 = psr
176         */
177
178        PUBLIC(_CPU_Context_restore_heir)
179SYM(_CPU_Context_restore_heir):
180        /*
181         *  Flush all windows with valid contents except the current one.
182         *  In examining the set register windows, one may logically divide
183         *  the windows into sets (some of which may be empty) based on their
184         *  current status: 
185         *
186         *    + current (i.e. in use),
187         *    + used (i.e. a restore would not trap)
188         *    + invalid (i.e. 1 in corresponding bit in WIM)
189         *    + unused
190         *
191         *  Either the used or unused set of windows may be empty.
192         *
193         *  NOTE: We assume only one bit is set in the WIM at a time.
194         *
195         *  Given a CWP of 5 and a WIM of 0x1, the registers are divided
196         *  into sets as follows:
197         *
198         *    + 0   - invalid
199         *    + 1-4 - unused
200         *    + 5   - current
201         *    + 6-7 - used
202         *
203         *  In this case, we only would save the used windows -- 6 and 7.
204         *
205         *   Traps are disabled for the same logical period as in a
206         *     flush all windows trap handler.
207         *   
208         *    Register Usage while saving the windows:
209         *      g1 = current PSR
210         *      g2 = current wim
211         *      g3 = CWP
212         *      g4 = wim scratch
213         *      g5 = scratch
214         */
215
216        ld      [%o1 + PSR_OFFSET], %g1       ! g1 = saved psr
217
218        and     %o2, SPARC_PSR_CWP_MASK, %g3  ! g3 = CWP
219                                              ! g1 = psr w/o cwp
220        andn    %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1
221        or      %g1, %g3, %g1                 ! g1 = heirs psr
222        mov     %g1, %psr                     ! restore status register and
223                                              ! **** DISABLE TRAPS ****
224        mov     %wim, %g2                     ! g2 = wim
225        mov     1, %g4
226        sll     %g4, %g3, %g4                 ! g4 = WIM mask for CW invalid
227
228save_frame_loop:
229        sll     %g4, 1, %g5                   ! rotate the "wim" left 1
230        srl     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4
231        or      %g4, %g5, %g4                 ! g4 = wim if we do one restore
232
233        /*
234         *  If a restore would not underflow, then continue.
235         */
236
237        andcc   %g4, %g2, %g0                 ! Any windows to flush?
238        bnz     done_flushing                 ! No, then continue
239        nop
240
241        restore                               ! back one window
242
243        /*
244         *  Now save the window just as if we overflowed to it.
245         */
246 
247        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
248        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
249        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
250        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
251 
252        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
253        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
254        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
255        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
256
257        ba      save_frame_loop
258        nop
259
260done_flushing:
261
262        add     %g3, 1, %g3                   ! calculate desired WIM
263        and     %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3
264        mov     1, %g4
265        sll     %g4, %g3, %g4                 ! g4 = new WIM
266        mov     %g4, %wim
267
268        or      %g1, SPARC_PSR_ET_MASK, %g1
269        mov     %g1, %psr                     ! **** ENABLE TRAPS ****
270                                              !   and restore CWP
271        nop
272        nop
273        nop
274
275        ! skip g0
276        ld      [%o1 + G1_OFFSET], %g1        ! restore the global registers
277        ldd     [%o1 + G2_OFFSET], %g2
278        ldd     [%o1 + G4_OFFSET], %g4
279        ldd     [%o1 + G6_OFFSET], %g6
280
281        ! Load thread specific ISR dispatch prevention flag
282        ld      [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
283        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %o3
284        ! Store it to memory later to use the cycles
285
286        ldd     [%o1 + L0_OFFSET], %l0        ! restore the local registers
287        ldd     [%o1 + L2_OFFSET], %l2
288        ldd     [%o1 + L4_OFFSET], %l4
289        ldd     [%o1 + L6_OFFSET], %l6
290
291        ! Now restore thread specific ISR dispatch prevention flag
292        st       %o2,[%o3 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
293
294        ldd     [%o1 + I0_OFFSET], %i0        ! restore the output registers
295        ldd     [%o1 + I2_OFFSET], %i2
296        ldd     [%o1 + I4_OFFSET], %i4
297        ldd     [%o1 + I6_FP_OFFSET], %i6
298
299        ldd     [%o1 + O2_OFFSET], %o2        ! restore the output registers
300        ldd     [%o1 + O4_OFFSET], %o4
301        ldd     [%o1 + O6_SP_OFFSET], %o6
302        ! do o0/o1 last to avoid destroying heir context pointer
303        ldd     [%o1 + O0_OFFSET], %o0        ! overwrite heir pointer
304
305        jmp     %o7 + 8                       ! return
306        nop                                   ! delay slot
307
308/*
309 *  void _CPU_Context_restore(
310 *    Context_Control *new_context
311 *  )
312 *
313 *  This routine is generally used only to perform restart self.
314 *
315 *  NOTE: It is unnecessary to reload some registers.
316 */
317
318        .align 4
319        PUBLIC(_CPU_Context_restore)
320SYM(_CPU_Context_restore):
321        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
322        rd      %psr, %o2
323        ba      SYM(_CPU_Context_restore_heir)
324        mov     %i0, %o1                      ! in the delay slot
325
326/*
327 *  void _ISR_Handler()
328 *
329 *  This routine provides the RTEMS interrupt management.
330 *
331 *  We enter this handler from the 4 instructions in the trap table with
332 *  the following registers assumed to be set as shown:
333 *
334 *    l0 = PSR
335 *    l1 = PC
336 *    l2 = nPC
337 *    l3 = trap type
338 *
339 *  NOTE: By an executive defined convention, trap type is between 0 and 255 if
340 *        it is an asynchonous trap and 256 and 511 if it is synchronous.
341 */
342
343        .align 4
344        PUBLIC(_ISR_Handler)
345SYM(_ISR_Handler):
346        /*
347         *  Fix the return address for synchronous traps.
348         */
349
350        andcc   %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
351                                      ! Is this a synchronous trap?
352        be,a    win_ovflow            ! No, then skip the adjustment
353        nop                           ! DELAY
354        mov     %l1, %l6              ! save trapped pc for debug info
355        mov     %l2, %l1              ! do not return to the instruction
356        add     %l2, 4, %l2           ! indicated
357
358win_ovflow:
359        /*
360         *  Save the globals this block uses.
361         *
362         *  These registers are not restored from the locals.  Their contents
363         *  are saved directly from the locals into the ISF below.
364         */
365
366        mov     %g4, %l4                 ! save the globals this block uses
367        mov     %g5, %l5
368
369        /*
370         *  When at a "window overflow" trap, (wim == (1 << cwp)).
371         *  If we get here like that, then process a window overflow.
372         */
373
374        rd      %wim, %g4
375        srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
376                                         !   are LS 5 bits ; how convenient :)
377        cmp     %g5, 1                   ! Is this an invalid window?
378        bne     dont_do_the_window       ! No, then skip all this stuff
379        ! we are using the delay slot
380
381        /*
382         *  The following is same as a 1 position right rotate of WIM
383         */
384
385        srl     %g4, 1, %g5              ! g5 = WIM >> 1
386        sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
387                                         ! g4 = WIM << (Number Windows - 1)
388        or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
389                                         !      (WIM << (Number Windows - 1))
390
391        /*
392         *  At this point:
393         *
394         *    g4 = the new WIM
395         *    g5 is free
396         */
397
398        /*
399         *  Since we are tinkering with the register windows, we need to
400         *  make sure that all the required information is in global registers.
401         */
402
403        save                          ! Save into the window
404        wr      %g4, 0, %wim          ! WIM = new WIM
405        nop                           ! delay slots
406        nop
407        nop
408
409        /*
410         *  Now save the window just as if we overflowed to it.
411         */
412
413        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
414        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
415        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
416        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
417
418        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
419        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
420        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
421        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
422
423        restore
424        nop
425
426dont_do_the_window:
427        /*
428         *  Global registers %g4 and %g5 are saved directly from %l4 and
429         *  %l5 directly into the ISF below.
430         */
431
432save_isf:
433
434        /*
435         *  Save the state of the interrupted task -- especially the global
436         *  registers -- in the Interrupt Stack Frame.  Note that the ISF
437         *  includes a regular minimum stack frame which will be used if
438         *  needed by register window overflow and underflow handlers.
439         *
440         *  REGISTERS SAME AS AT _ISR_Handler
441         */
442
443        sub     %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
444                                               ! make space for ISF
445
446        std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
447        st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
448        st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
449        std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
450        std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
451        std     %g6, [%sp + ISF_G6_OFFSET]     ! save g6, g7
452
453        std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
454        std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
455        std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
456        std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
457
458        rd      %y, %g1
459        st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
460        st      %l6, [%sp + ISF_TPC_OFFSET]    ! save real trapped pc
461
462        mov     %sp, %o1                       ! 2nd arg to ISR Handler
463
464        /*
465         *  Increment ISR nest level and Thread dispatch disable level.
466         *
467         *  Register usage for this section:
468         *
469         *    l4 = _Thread_Dispatch_disable_level pointer
470         *    l5 = _ISR_Nest_level pointer
471         *    l6 = _Thread_Dispatch_disable_level value
472         *    l7 = _ISR_Nest_level value
473         *
474         *  NOTE: It is assumed that l4 - l7 will be preserved until the ISR
475         *        nest and thread dispatch disable levels are unnested.
476         */
477
478        sethi    %hi(SYM(_Thread_Dispatch_disable_level)), %l4
479        ld       [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
480        sethi    %hi(SYM(_ISR_Nest_level)), %l5
481        ld       [%l5 + %lo(SYM(_ISR_Nest_level))], %l7
482
483        add      %l6, 1, %l6
484        st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
485
486        add      %l7, 1, %l7
487        st       %l7, [%l5 + %lo(SYM(_ISR_Nest_level))]
488
489        /*
490         *  If ISR nest level was zero (now 1), then switch stack.
491         */
492
493        mov      %sp, %fp
494        subcc    %l7, 1, %l7             ! outermost interrupt handler?
495        bnz      dont_switch_stacks      ! No, then do not switch stacks
496
497        sethi    %hi(SYM(_CPU_Interrupt_stack_high)), %g4
498        ld       [%g4 + %lo(SYM(_CPU_Interrupt_stack_high))], %sp
499
500dont_switch_stacks:
501        /*
502         *  Make sure we have a place on the stack for the window overflow
503         *  trap handler to write into.  At this point it is safe to
504         *  enable traps again.
505         */
506
507        sub      %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
508
509        /*
510         *  Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
511         *  set the PIL in the %psr to mask off interrupts with lower priority.
512         *  The original %psr in %l0 is not modified since it will be restored
513         *  when the interrupt handler returns.
514         */
515
516        mov      %l0, %g5
517        and      %l3, 0x0ff, %g4
518
519/* This is a fix for ERC32 with FPU rev.B or rev.C */
520
521#if defined(FPU_REVB)
522
523
524        subcc    %g4, 0x08, %g0
525        be       fpu_revb
526        subcc    %g4, 0x11, %g0
527        bl       dont_fix_pil
528        subcc    %g4, 0x1f, %g0
529        bg       dont_fix_pil
530        sll      %g4, 8, %g4
531        and      %g4, SPARC_PSR_PIL_MASK, %g4
532        andn     %l0, SPARC_PSR_PIL_MASK, %g5
533        or       %g4, %g5, %g5
534        srl      %l0, 12, %g4
535        andcc    %g4, 1, %g0
536        be       dont_fix_pil
537        nop
538        ba,a     enable_irq
539
540
541fpu_revb:
542        srl      %l0, 12, %g4   ! check if EF is set in %psr
543        andcc    %g4, 1, %g0
544        be       dont_fix_pil   ! if FPU disabled than continue as normal
545        and      %l3, 0xff, %g4
546        subcc    %g4, 0x08, %g0
547        bne      enable_irq     ! if not a FPU exception then do two fmovs
548        set      __sparc_fq, %g4
549        st       %fsr, [%g4]    ! if FQ is not empty and FQ[1] = fmovs
550        ld       [%g4], %g4     ! than this is bug 3.14
551        srl      %g4, 13, %g4
552        andcc    %g4, 1, %g0
553        be       dont_fix_pil
554        set      __sparc_fq, %g4
555        std      %fq, [%g4]
556        ld       [%g4+4], %g4
557        set      0x81a00020, %g5
558        subcc    %g4, %g5, %g0
559        bne,a    dont_fix_pil2
560        wr       %l0, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
561        ba,a     simple_return
562         
563enable_irq:
564        or       %g5, SPARC_PSR_PIL_MASK, %g4
565        wr       %g4, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
566        nop; nop; nop
567        fmovs    %f0, %f0
568        ba       dont_fix_pil
569        fmovs    %f0, %f0
570
571        .data
572        .global __sparc_fq
573        .align 8
574__sparc_fq:
575        .word 0,0
576
577        .text
578/* end of ERC32 FPU rev.B/C fix */
579
580#else
581
582        subcc    %g4, 0x11, %g0
583        bl       dont_fix_pil
584        subcc    %g4, 0x1f, %g0
585        bg       dont_fix_pil
586        sll      %g4, 8, %g4
587        and      %g4, SPARC_PSR_PIL_MASK, %g4
588        andn     %l0, SPARC_PSR_PIL_MASK, %g5
589        ba       pil_fixed
590        or       %g4, %g5, %g5
591#endif
592
593dont_fix_pil:
594        or       %g5, SPARC_PSR_PIL_MASK, %g5
595pil_fixed:
596        wr       %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
597dont_fix_pil2:
598
599        /*
600         *  Vector to user's handler.
601         *
602         *  NOTE: TBR may no longer have vector number in it since
603         *        we just enabled traps.  It is definitely in l3.
604         */
605
606        sethi    %hi(SYM(_ISR_Vector_table)), %g4
607        ld       [%g4+%lo(SYM(_ISR_Vector_table))], %g4
608        and      %l3, 0xFF, %g5         ! remove synchronous trap indicator
609        sll      %g5, 2, %g5            ! g5 = offset into table
610        ld       [%g4 + %g5], %g4       ! g4 = _ISR_Vector_table[ vector ]
611
612
613                                        ! o1 = 2nd arg = address of the ISF
614                                        !   WAS LOADED WHEN ISF WAS SAVED!!!
615        mov      %l3, %o0               ! o0 = 1st arg = vector number
616        call     %g4, 0
617        nop                             ! delay slot
618
619        /*
620         *  Redisable traps so we can finish up the interrupt processing.
621         *  This is a VERY conservative place to do this.
622         *
623         *  NOTE: %l0 has the PSR which was in place when we took the trap.
624         */
625
626        mov      %l0, %psr             ! **** DISABLE TRAPS ****
627        nop; nop; nop
628
629        /*
630         *  Decrement ISR nest level and Thread dispatch disable level.
631         *
632         *  Register usage for this section:
633         *
634         *    l4 = _Thread_Dispatch_disable_level pointer
635         *    l5 = _ISR_Nest_level pointer
636         *    l6 = _Thread_Dispatch_disable_level value
637         *    l7 = _ISR_Nest_level value
638         */
639
640        sub      %l6, 1, %l6
641        st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
642
643        st       %l7, [%l5 + %lo(SYM(_ISR_Nest_level))]
644
645        /*
646         *  If dispatching is disabled (includes nested interrupt case),
647         *  then do a "simple" exit.
648         */
649
650        orcc     %l6, %g0, %g0   ! Is dispatching disabled?
651        bnz      simple_return   ! Yes, then do a "simple" exit
652        ! NOTE: Use the delay slot
653        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l6
654
655        ! Are we dispatching from a previous ISR in the interrupted thread?
656        ld       [%l6 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %l7
657        orcc     %l7, %g0, %g0   ! Is this thread already doing an ISR?
658        bnz      simple_return   ! Yes, then do a "simple" exit
659        ! NOTE: Use the delay slot
660        sethi    %hi(SYM(_Context_Switch_necessary)), %l4
661       
662
663        /*
664         *  If a context switch is necessary, then do fudge stack to
665         *  return to the interrupt dispatcher.
666         */
667
668        ldub     [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5
669
670        orcc     %l5, %g0, %g0   ! Is thread switch necessary?
671        bnz      SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher
672        ! NOTE: Use the delay slot
673        sethi    %hi(SYM(_ISR_Signals_to_thread_executing)), %l6
674
675        /*
676         *  Finally, check to see if signals were sent to the currently
677         *  executing task.  If so, we need to invoke the interrupt dispatcher.
678         */
679
680        ldub     [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7
681
682        orcc     %l7, %g0, %g0   ! Were signals sent to the currently
683                                 !   executing thread?
684        bz       simple_return   ! yes, then invoke the dispatcher
685                                 ! use the delay slot to clear the signals
686                                 !   to the currently executing task flag
687        st       %g0, [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))]
688                                 
689
690        /*
691         *  Invoke interrupt dispatcher.
692         */
693
694        PUBLIC(_ISR_Dispatch)
695SYM(_ISR_Dispatch):
696        ! Set ISR dispatch nesting prevention flag
697        mov      1,%l6
698        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
699        st       %l6,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
700
701        /*
702         *  The following subtract should get us back on the interrupted
703         *  tasks stack and add enough room to invoke the dispatcher.
704         *  When we enable traps, we are mostly back in the context
705         *  of the task and subsequent interrupts can operate normally.
706         */
707
708        sub      %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
709
710        or      %l0, SPARC_PSR_ET_MASK, %l7    ! l7 = PSR with ET=1
711        mov     %l7, %psr                      !  **** ENABLE TRAPS ****
712        nop
713        nop
714        nop
715isr_dispatch:
716        call    SYM(_Thread_Dispatch), 0
717        nop
718
719        /*
720         *  We invoked _Thread_Dispatch in a state similar to the interrupted
721         *  task.  In order to safely be able to tinker with the register
722         *  windows and get the task back to its pre-interrupt state,
723         *  we need to disable interrupts disabled so we can safely tinker
724         *  with the register windowing.  In particular, the CWP in the PSR
725         *  is fragile during this period. (See PR578.)
726         */
727        mov     2,%g1                           ! syscall (disable interrupts)
728        ta      0                               ! syscall (disable interrupts)
729
730        /*
731         *  While we had ISR dispatching disabled in this thread,
732         *  did we miss anything.  If so, then we need to do another
733         *  _Thread_Dispatch before leaving this ISR Dispatch context.
734         */
735
736        sethi    %hi(SYM(_Context_Switch_necessary)), %l4
737        ldub     [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5
738
739        ! NOTE: Use some of delay slot to start loading this
740        sethi    %hi(SYM(_ISR_Signals_to_thread_executing)), %l6
741        ldub     [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7
742
743        orcc     %l5, %g0, %g0   ! Is thread switch necessary?
744        bnz      dispatchAgain   ! yes, then invoke the dispatcher AGAIN
745        ! NOTE: Use the delay slot to catch the orcc below
746
747        /*
748         *  Finally, check to see if signals were sent to the currently
749         *  executing task.  If so, we need to invoke the interrupt dispatcher.
750         */
751
752        ! NOTE: Delay slots above were used to perform the load AND
753        !       this orcc falls into the delay slot for bnz above
754        orcc     %l7, %g0, %g0   ! Were signals sent to the currently
755                                 !   executing thread?
756        bz       allow_nest_again ! No, then clear out and return
757        ! NOTE: use the delay slot from the bz to load 3 into %g1
758
759        ! Yes, then invoke the dispatcher
760dispatchAgain:
761        mov     3,%g1                           ! syscall (enable interrupts)
762        ta      0                               ! syscall (enable interrupts)
763        ba      isr_dispatch
764        nop
765
766allow_nest_again:
767
768        ! Zero out ISR stack nesting prevention flag
769        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
770        st       %g0,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
771
772        /*
773         *  The CWP in place at this point may be different from
774         *  that which was in effect at the beginning of the ISR if we
775         *  have been context switched between the beginning of this invocation
776         *  of _ISR_Handler and this point.  Thus the CWP and WIM should
777         *  not be changed back to their values at ISR entry time.  Any
778         *  changes to the PSR must preserve the CWP.
779         */
780
781simple_return:
782        ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
783        wr      %l5, 0, %y
784
785        ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
786        ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
787        rd      %psr, %l3
788        and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
789        andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
790        or      %l3, %l0, %l0                  ! install it later...
791        andn    %l0, SPARC_PSR_ET_MASK, %l0
792
793        /*
794         *  Restore tasks global and out registers
795         */
796
797        mov    %fp, %g1
798
799                                              ! g1 is restored later
800        ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
801        ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
802        ldd     [%fp + ISF_G6_OFFSET], %g6    ! restore g6, g7
803
804        ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
805        ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
806        ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
807        ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
808
809        /*
810         *  Registers:
811         *
812         *   ALL global registers EXCEPT G1 and the input registers have
813         *   already been restored and thuse off limits.
814         *
815         *   The following is the contents of the local registers:
816         *
817         *     l0 = original psr
818         *     l1 = return address (i.e. PC)
819         *     l2 = nPC
820         *     l3 = CWP
821         */
822
823        /*
824         *  if (CWP + 1) is an invalid window then we need to reload it.
825         *
826         *  WARNING: Traps should now be disabled
827         */
828
829        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
830        nop
831        nop
832        nop
833        rd      %wim, %l4
834        add     %l0, 1, %l6                ! l6 = cwp + 1
835        and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
836        srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
837                                           !  and CWP are conveniently LS 5 bits
838        cmp     %l5, 1                     ! Is tasks window invalid?
839        bne     good_task_window
840
841        /*
842         *  The following code is the same as a 1 position left rotate of WIM.
843         */
844
845        sll     %l4, 1, %l5                ! l5 = WIM << 1
846        srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
847                                           ! l4 = WIM >> (Number Windows - 1)
848        or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
849                                           !      (WIM >> (Number Windows - 1))
850
851        /*
852         *  Now restore the window just as if we underflowed to it.
853         */
854
855        wr      %l4, 0, %wim               ! WIM = new WIM
856        nop                                ! must delay after writing WIM
857        nop
858        nop
859        restore                            ! now into the tasks window
860
861        ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
862        ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
863        ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
864        ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
865        ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
866        ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
867        ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
868        ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
869                                           ! reload of sp clobbers ISF
870        save                               ! Back to ISR dispatch window
871
872good_task_window:
873
874        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
875        nop; nop; nop
876                                           !  and restore condition codes.
877        ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
878        jmp     %l1                        ! transfer control and
879        rett    %l2                        ! go back to tasks window
880
881/* end of file */
Note: See TracBrowser for help on using the repository browser.