source: rtems/cpukit/score/cpu/sparc/cpu_asm.S @ febaa8a

4.104.11
Last change on this file since febaa8a was febaa8a, checked in by Joel Sherrill <joel.sherrill@…>, on Mar 27, 2010 at 3:03:09 PM

2010-03-27 Joel Sherrill <joel.sherrill@…>

  • cpu.c, cpu_asm.S: Add include of config.h
  • Property mode set to 100644
File size: 30.5 KB
Line 
1/*  cpu_asm.s
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language.
6 *
7 *  COPYRIGHT (c) 1989-2007.
8 *  On-Line Applications Research Corporation (OAR).
9 *
10 *  The license and distribution terms for this file may be
11 *  found in the file LICENSE in this distribution or at
12 *  http://www.rtems.com/license/LICENSE.
13 *
14 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
15 *  Research Corporation (OAR) under contract to the European Space
16 *  Agency (ESA).
17 *
18 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
19 *  European Space Agency.
20 *
21 *  $Id$
22 */
23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <rtems/asm.h>
29
30#if (SPARC_HAS_FPU == 1)
31
32/*
33 *  void _CPU_Context_save_fp(
34 *    void **fp_context_ptr
35 *  )
36 *
37 *  This routine is responsible for saving the FP context
38 *  at *fp_context_ptr.  If the point to load the FP context
39 *  from is changed then the pointer is modified by this routine.
40 *
41 *  NOTE: See the README in this directory for information on the
42 *        management of the "EF" bit in the PSR.
43 */
44
45        .align 4
46        PUBLIC(_CPU_Context_save_fp)
47SYM(_CPU_Context_save_fp):
48        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
49
50        /*
51         *  The following enables the floating point unit.
52         */
53
54        mov     %psr, %l0
55        sethi   %hi(SPARC_PSR_EF_MASK), %l1
56        or      %l1, %lo(SPARC_PSR_EF_MASK), %l1
57        or      %l0, %l1, %l0
58        mov     %l0, %psr                  ! **** ENABLE FLOAT ACCESS ****
59        nop; nop; nop;                     ! Need three nops before EF is
60        ld      [%i0], %l0                 ! active due to pipeline delay!!!
61        std     %f0, [%l0 + FO_F1_OFFSET]
62        std     %f2, [%l0 + F2_F3_OFFSET]
63        std     %f4, [%l0 + F4_F5_OFFSET]
64        std     %f6, [%l0 + F6_F7_OFFSET]
65        std     %f8, [%l0 + F8_F9_OFFSET]
66        std     %f10, [%l0 + F1O_F11_OFFSET]
67        std     %f12, [%l0 + F12_F13_OFFSET]
68        std     %f14, [%l0 + F14_F15_OFFSET]
69        std     %f16, [%l0 + F16_F17_OFFSET]
70        std     %f18, [%l0 + F18_F19_OFFSET]
71        std     %f20, [%l0 + F2O_F21_OFFSET]
72        std     %f22, [%l0 + F22_F23_OFFSET]
73        std     %f24, [%l0 + F24_F25_OFFSET]
74        std     %f26, [%l0 + F26_F27_OFFSET]
75        std     %f28, [%l0 + F28_F29_OFFSET]
76        std     %f30, [%l0 + F3O_F31_OFFSET]
77        st      %fsr, [%l0 + FSR_OFFSET]
78        ret
79        restore
80
81/*
82 *  void _CPU_Context_restore_fp(
83 *    void **fp_context_ptr
84 *  )
85 *
86 *  This routine is responsible for restoring the FP context
87 *  at *fp_context_ptr.  If the point to load the FP context
88 *  from is changed then the pointer is modified by this routine.
89 *
90 *  NOTE: See the README in this directory for information on the
91 *        management of the "EF" bit in the PSR.
92 */
93
94        .align 4
95        PUBLIC(_CPU_Context_restore_fp)
96SYM(_CPU_Context_restore_fp):
97        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE , %sp
98
99        /*
100         *  The following enables the floating point unit.
101         */
102
103        mov     %psr, %l0
104        sethi   %hi(SPARC_PSR_EF_MASK), %l1
105        or      %l1, %lo(SPARC_PSR_EF_MASK), %l1
106        or      %l0, %l1, %l0
107        mov     %l0, %psr                  ! **** ENABLE FLOAT ACCESS ****
108        nop; nop; nop;                     ! Need three nops before EF is
109        ld      [%i0], %l0                 ! active due to pipeline delay!!!
110        ldd     [%l0 + FO_F1_OFFSET], %f0
111        ldd     [%l0 + F2_F3_OFFSET], %f2
112        ldd     [%l0 + F4_F5_OFFSET], %f4
113        ldd     [%l0 + F6_F7_OFFSET], %f6
114        ldd     [%l0 + F8_F9_OFFSET], %f8
115        ldd     [%l0 + F1O_F11_OFFSET], %f10
116        ldd     [%l0 + F12_F13_OFFSET], %f12
117        ldd     [%l0 + F14_F15_OFFSET], %f14
118        ldd     [%l0 + F16_F17_OFFSET], %f16
119        ldd     [%l0 + F18_F19_OFFSET], %f18
120        ldd     [%l0 + F2O_F21_OFFSET], %f20
121        ldd     [%l0 + F22_F23_OFFSET], %f22
122        ldd     [%l0 + F24_F25_OFFSET], %f24
123        ldd     [%l0 + F26_F27_OFFSET], %f26
124        ldd     [%l0 + F28_F29_OFFSET], %f28
125        ldd     [%l0 + F3O_F31_OFFSET], %f30
126        ld      [%l0 + FSR_OFFSET], %fsr
127        ret
128        restore
129
130#endif /* SPARC_HAS_FPU */
131
132/*
133 *  void _CPU_Context_switch(
134 *    Context_Control  *run,
135 *    Context_Control  *heir
136 *  )
137 *
138 *  This routine performs a normal non-FP context switch.
139 */
140
141        .align 4
142        PUBLIC(_CPU_Context_switch)
143SYM(_CPU_Context_switch):
144        ! skip g0
145        st      %g1, [%o0 + G1_OFFSET]       ! save the global registers
146        std     %g2, [%o0 + G2_OFFSET]
147        std     %g4, [%o0 + G4_OFFSET]
148        std     %g6, [%o0 + G6_OFFSET]
149
150        ! load the address of the ISR stack nesting prevention flag
151        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %g2
152        ld       [%g2 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %g2
153        ! save it a bit later so we do not waste a couple of cycles
154
155        std     %l0, [%o0 + L0_OFFSET]       ! save the local registers
156        std     %l2, [%o0 + L2_OFFSET]
157        std     %l4, [%o0 + L4_OFFSET]
158        std     %l6, [%o0 + L6_OFFSET]
159
160        ! Now actually save ISR stack nesting prevention flag
161        st       %g2, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
162
163        std     %i0, [%o0 + I0_OFFSET]       ! save the input registers
164        std     %i2, [%o0 + I2_OFFSET]
165        std     %i4, [%o0 + I4_OFFSET]
166        std     %i6, [%o0 + I6_FP_OFFSET]
167
168        std     %o0, [%o0 + O0_OFFSET]       ! save the output registers
169        std     %o2, [%o0 + O2_OFFSET]
170        std     %o4, [%o0 + O4_OFFSET]
171        std     %o6, [%o0 + O6_SP_OFFSET]
172
173        rd      %psr, %o2
174        st      %o2, [%o0 + PSR_OFFSET]      ! save status register
175
176        /*
177         *  This is entered from _CPU_Context_restore with:
178         *    o1 = context to restore
179         *    o2 = psr
180         */
181
182        PUBLIC(_CPU_Context_restore_heir)
183SYM(_CPU_Context_restore_heir):
184        /*
185         *  Flush all windows with valid contents except the current one.
186         *  In examining the set register windows, one may logically divide
187         *  the windows into sets (some of which may be empty) based on their
188         *  current status:
189         *
190         *    + current (i.e. in use),
191         *    + used (i.e. a restore would not trap)
192         *    + invalid (i.e. 1 in corresponding bit in WIM)
193         *    + unused
194         *
195         *  Either the used or unused set of windows may be empty.
196         *
197         *  NOTE: We assume only one bit is set in the WIM at a time.
198         *
199         *  Given a CWP of 5 and a WIM of 0x1, the registers are divided
200         *  into sets as follows:
201         *
202         *    + 0   - invalid
203         *    + 1-4 - unused
204         *    + 5   - current
205         *    + 6-7 - used
206         *
207         *  In this case, we only would save the used windows -- 6 and 7.
208         *
209         *   Traps are disabled for the same logical period as in a
210         *     flush all windows trap handler.
211         *
212         *    Register Usage while saving the windows:
213         *      g1 = current PSR
214         *      g2 = current wim
215         *      g3 = CWP
216         *      g4 = wim scratch
217         *      g5 = scratch
218         */
219
220        ld      [%o1 + PSR_OFFSET], %g1       ! g1 = saved psr
221
222        and     %o2, SPARC_PSR_CWP_MASK, %g3  ! g3 = CWP
223                                              ! g1 = psr w/o cwp
224        andn    %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1
225        or      %g1, %g3, %g1                 ! g1 = heirs psr
226        mov     %g1, %psr                     ! restore status register and
227                                              ! **** DISABLE TRAPS ****
228        mov     %wim, %g2                     ! g2 = wim
229        mov     1, %g4
230        sll     %g4, %g3, %g4                 ! g4 = WIM mask for CW invalid
231
232save_frame_loop:
233        sll     %g4, 1, %g5                   ! rotate the "wim" left 1
234        srl     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4
235        or      %g4, %g5, %g4                 ! g4 = wim if we do one restore
236
237        /*
238         *  If a restore would not underflow, then continue.
239         */
240
241        andcc   %g4, %g2, %g0                 ! Any windows to flush?
242        bnz     done_flushing                 ! No, then continue
243        nop
244
245        restore                               ! back one window
246
247        /*
248         *  Now save the window just as if we overflowed to it.
249         */
250
251        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
252        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
253        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
254        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
255
256        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
257        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
258        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
259        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
260
261        ba      save_frame_loop
262        nop
263
264done_flushing:
265
266        add     %g3, 1, %g3                   ! calculate desired WIM
267        and     %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3
268        mov     1, %g4
269        sll     %g4, %g3, %g4                 ! g4 = new WIM
270        mov     %g4, %wim
271
272        or      %g1, SPARC_PSR_ET_MASK, %g1
273        mov     %g1, %psr                     ! **** ENABLE TRAPS ****
274                                              !   and restore CWP
275        nop
276        nop
277        nop
278
279        ! skip g0
280        ld      [%o1 + G1_OFFSET], %g1        ! restore the global registers
281        ldd     [%o1 + G2_OFFSET], %g2
282        ldd     [%o1 + G4_OFFSET], %g4
283        ldd     [%o1 + G6_OFFSET], %g6
284
285        ! Load thread specific ISR dispatch prevention flag
286        ld      [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
287        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %o3
288        ! Store it to memory later to use the cycles
289
290        ldd     [%o1 + L0_OFFSET], %l0        ! restore the local registers
291        ldd     [%o1 + L2_OFFSET], %l2
292        ldd     [%o1 + L4_OFFSET], %l4
293        ldd     [%o1 + L6_OFFSET], %l6
294
295        ! Now restore thread specific ISR dispatch prevention flag
296        st       %o2,[%o3 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
297
298        ldd     [%o1 + I0_OFFSET], %i0        ! restore the output registers
299        ldd     [%o1 + I2_OFFSET], %i2
300        ldd     [%o1 + I4_OFFSET], %i4
301        ldd     [%o1 + I6_FP_OFFSET], %i6
302
303        ldd     [%o1 + O2_OFFSET], %o2        ! restore the output registers
304        ldd     [%o1 + O4_OFFSET], %o4
305        ldd     [%o1 + O6_SP_OFFSET], %o6
306        ! do o0/o1 last to avoid destroying heir context pointer
307        ldd     [%o1 + O0_OFFSET], %o0        ! overwrite heir pointer
308
309        jmp     %o7 + 8                       ! return
310        nop                                   ! delay slot
311
312/*
313 *  void _CPU_Context_restore(
314 *    Context_Control *new_context
315 *  )
316 *
317 *  This routine is generally used only to perform restart self.
318 *
319 *  NOTE: It is unnecessary to reload some registers.
320 */
321
322        .align 4
323        PUBLIC(_CPU_Context_restore)
324SYM(_CPU_Context_restore):
325        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
326        rd      %psr, %o2
327        ba      SYM(_CPU_Context_restore_heir)
328        mov     %i0, %o1                      ! in the delay slot
329
330/*
331 *  void _ISR_Handler()
332 *
333 *  This routine provides the RTEMS interrupt management.
334 *
335 *  We enter this handler from the 4 instructions in the trap table with
336 *  the following registers assumed to be set as shown:
337 *
338 *    l0 = PSR
339 *    l1 = PC
340 *    l2 = nPC
341 *    l3 = trap type
342 *
343 *  NOTE: By an executive defined convention, trap type is between 0 and 255 if
344 *        it is an asynchonous trap and 256 and 511 if it is synchronous.
345 */
346
347        .align 4
348        PUBLIC(_ISR_Handler)
349SYM(_ISR_Handler):
350        /*
351         *  Fix the return address for synchronous traps.
352         */
353
354        andcc   %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
355                                      ! Is this a synchronous trap?
356        be,a    win_ovflow            ! No, then skip the adjustment
357        nop                           ! DELAY
358        mov     %l1, %l6              ! save trapped pc for debug info
359        mov     %l2, %l1              ! do not return to the instruction
360        add     %l2, 4, %l2           ! indicated
361
362win_ovflow:
363        /*
364         *  Save the globals this block uses.
365         *
366         *  These registers are not restored from the locals.  Their contents
367         *  are saved directly from the locals into the ISF below.
368         */
369
370        mov     %g4, %l4                 ! save the globals this block uses
371        mov     %g5, %l5
372
373        /*
374         *  When at a "window overflow" trap, (wim == (1 << cwp)).
375         *  If we get here like that, then process a window overflow.
376         */
377
378        rd      %wim, %g4
379        srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
380                                         !   are LS 5 bits ; how convenient :)
381        cmp     %g5, 1                   ! Is this an invalid window?
382        bne     dont_do_the_window       ! No, then skip all this stuff
383        ! we are using the delay slot
384
385        /*
386         *  The following is same as a 1 position right rotate of WIM
387         */
388
389        srl     %g4, 1, %g5              ! g5 = WIM >> 1
390        sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
391                                         ! g4 = WIM << (Number Windows - 1)
392        or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
393                                         !      (WIM << (Number Windows - 1))
394
395        /*
396         *  At this point:
397         *
398         *    g4 = the new WIM
399         *    g5 is free
400         */
401
402        /*
403         *  Since we are tinkering with the register windows, we need to
404         *  make sure that all the required information is in global registers.
405         */
406
407        save                          ! Save into the window
408        wr      %g4, 0, %wim          ! WIM = new WIM
409        nop                           ! delay slots
410        nop
411        nop
412
413        /*
414         *  Now save the window just as if we overflowed to it.
415         */
416
417        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
418        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
419        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
420        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
421
422        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
423        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
424        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
425        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
426
427        restore
428        nop
429
430dont_do_the_window:
431        /*
432         *  Global registers %g4 and %g5 are saved directly from %l4 and
433         *  %l5 directly into the ISF below.
434         */
435
436save_isf:
437
438        /*
439         *  Save the state of the interrupted task -- especially the global
440         *  registers -- in the Interrupt Stack Frame.  Note that the ISF
441         *  includes a regular minimum stack frame which will be used if
442         *  needed by register window overflow and underflow handlers.
443         *
444         *  REGISTERS SAME AS AT _ISR_Handler
445         */
446
447        sub     %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
448                                               ! make space for ISF
449
450        std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
451        st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
452        st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
453        std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
454        std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
455        std     %g6, [%sp + ISF_G6_OFFSET]     ! save g6, g7
456
457        std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
458        std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
459        std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
460        std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
461
462        rd      %y, %g1
463        st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
464        st      %l6, [%sp + ISF_TPC_OFFSET]    ! save real trapped pc
465
466        mov     %sp, %o1                       ! 2nd arg to ISR Handler
467
468        /*
469         *  Increment ISR nest level and Thread dispatch disable level.
470         *
471         *  Register usage for this section:
472         *
473         *    l4 = _Thread_Dispatch_disable_level pointer
474         *    l5 = _ISR_Nest_level pointer
475         *    l6 = _Thread_Dispatch_disable_level value
476         *    l7 = _ISR_Nest_level value
477         *
478         *  NOTE: It is assumed that l4 - l7 will be preserved until the ISR
479         *        nest and thread dispatch disable levels are unnested.
480         */
481
482        sethi    %hi(SYM(_Thread_Dispatch_disable_level)), %l4
483        ld       [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
484        sethi    %hi(SYM(_ISR_Nest_level)), %l5
485        ld       [%l5 + %lo(SYM(_ISR_Nest_level))], %l7
486
487        add      %l6, 1, %l6
488        st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
489
490        add      %l7, 1, %l7
491        st       %l7, [%l5 + %lo(SYM(_ISR_Nest_level))]
492
493        /*
494         *  If ISR nest level was zero (now 1), then switch stack.
495         */
496
497        mov      %sp, %fp
498        subcc    %l7, 1, %l7             ! outermost interrupt handler?
499        bnz      dont_switch_stacks      ! No, then do not switch stacks
500
501        sethi    %hi(SYM(_CPU_Interrupt_stack_high)), %g4
502        ld       [%g4 + %lo(SYM(_CPU_Interrupt_stack_high))], %sp
503
504dont_switch_stacks:
505        /*
506         *  Make sure we have a place on the stack for the window overflow
507         *  trap handler to write into.  At this point it is safe to
508         *  enable traps again.
509         */
510
511        sub      %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
512
513        /*
514         *  Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
515         *  set the PIL in the %psr to mask off interrupts with lower priority.
516         *  The original %psr in %l0 is not modified since it will be restored
517         *  when the interrupt handler returns.
518         */
519
520        mov      %l0, %g5
521        and      %l3, 0x0ff, %g4
522
523/* This is a fix for ERC32 with FPU rev.B or rev.C */
524
525#if defined(FPU_REVB)
526
527
528        subcc    %g4, 0x08, %g0
529        be       fpu_revb
530        subcc    %g4, 0x11, %g0
531        bl       dont_fix_pil
532        subcc    %g4, 0x1f, %g0
533        bg       dont_fix_pil
534        sll      %g4, 8, %g4
535        and      %g4, SPARC_PSR_PIL_MASK, %g4
536        andn     %l0, SPARC_PSR_PIL_MASK, %g5
537        or       %g4, %g5, %g5
538        srl      %l0, 12, %g4
539        andcc    %g4, 1, %g0
540        be       dont_fix_pil
541        nop
542        ba,a     enable_irq
543
544
545fpu_revb:
546        srl      %l0, 12, %g4   ! check if EF is set in %psr
547        andcc    %g4, 1, %g0
548        be       dont_fix_pil   ! if FPU disabled than continue as normal
549        and      %l3, 0xff, %g4
550        subcc    %g4, 0x08, %g0
551        bne      enable_irq     ! if not a FPU exception then do two fmovs
552        set      __sparc_fq, %g4
553        st       %fsr, [%g4]    ! if FQ is not empty and FQ[1] = fmovs
554        ld       [%g4], %g4     ! than this is bug 3.14
555        srl      %g4, 13, %g4
556        andcc    %g4, 1, %g0
557        be       dont_fix_pil
558        set      __sparc_fq, %g4
559        std      %fq, [%g4]
560        ld       [%g4+4], %g4
561        set      0x81a00020, %g5
562        subcc    %g4, %g5, %g0
563        bne,a    dont_fix_pil2
564        wr       %l0, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
565        ba,a     simple_return
566       
567enable_irq:
568        or       %g5, SPARC_PSR_PIL_MASK, %g4
569        wr       %g4, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
570        nop; nop; nop
571        fmovs    %f0, %f0
572        ba       dont_fix_pil
573        fmovs    %f0, %f0
574
575        .data
576        .global __sparc_fq
577        .align 8
578__sparc_fq:
579        .word 0,0
580
581        .text
582/* end of ERC32 FPU rev.B/C fix */
583
584#else
585
586        subcc    %g4, 0x11, %g0
587        bl       dont_fix_pil
588        subcc    %g4, 0x1f, %g0
589        bg       dont_fix_pil
590        sll      %g4, 8, %g4
591        and      %g4, SPARC_PSR_PIL_MASK, %g4
592        andn     %l0, SPARC_PSR_PIL_MASK, %g5
593        ba       pil_fixed
594        or       %g4, %g5, %g5
595#endif
596
597dont_fix_pil:
598        or       %g5, SPARC_PSR_PIL_MASK, %g5
599pil_fixed:
600        wr       %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
601dont_fix_pil2:
602
603        /*
604         *  Vector to user's handler.
605         *
606         *  NOTE: TBR may no longer have vector number in it since
607         *        we just enabled traps.  It is definitely in l3.
608         */
609
610        sethi    %hi(SYM(_ISR_Vector_table)), %g4
611        ld       [%g4+%lo(SYM(_ISR_Vector_table))], %g4
612        and      %l3, 0xFF, %g5         ! remove synchronous trap indicator
613        sll      %g5, 2, %g5            ! g5 = offset into table
614        ld       [%g4 + %g5], %g4       ! g4 = _ISR_Vector_table[ vector ]
615
616
617                                        ! o1 = 2nd arg = address of the ISF
618                                        !   WAS LOADED WHEN ISF WAS SAVED!!!
619        mov      %l3, %o0               ! o0 = 1st arg = vector number
620        call     %g4, 0
621        nop                             ! delay slot
622
623        /*
624         *  Redisable traps so we can finish up the interrupt processing.
625         *  This is a VERY conservative place to do this.
626         *
627         *  NOTE: %l0 has the PSR which was in place when we took the trap.
628         */
629
630        mov      %l0, %psr             ! **** DISABLE TRAPS ****
631        nop; nop; nop
632
633        /*
634         *  Decrement ISR nest level and Thread dispatch disable level.
635         *
636         *  Register usage for this section:
637         *
638         *    l4 = _Thread_Dispatch_disable_level pointer
639         *    l5 = _ISR_Nest_level pointer
640         *    l6 = _Thread_Dispatch_disable_level value
641         *    l7 = _ISR_Nest_level value
642         */
643
644        sub      %l6, 1, %l6
645        st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
646
647        st       %l7, [%l5 + %lo(SYM(_ISR_Nest_level))]
648
649        /*
650         *  If dispatching is disabled (includes nested interrupt case),
651         *  then do a "simple" exit.
652         */
653
654        orcc     %l6, %g0, %g0   ! Is dispatching disabled?
655        bnz      simple_return   ! Yes, then do a "simple" exit
656        ! NOTE: Use the delay slot
657        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l6
658
659        ! Are we dispatching from a previous ISR in the interrupted thread?
660        ld       [%l6 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %l7
661        orcc     %l7, %g0, %g0   ! Is this thread already doing an ISR?
662        bnz      simple_return   ! Yes, then do a "simple" exit
663        ! NOTE: Use the delay slot
664        sethi    %hi(SYM(_Context_Switch_necessary)), %l4
665
666
667        /*
668         *  If a context switch is necessary, then do fudge stack to
669         *  return to the interrupt dispatcher.
670         */
671
672        ldub     [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5
673
674        orcc     %l5, %g0, %g0   ! Is thread switch necessary?
675        bnz      SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher
676        ! NOTE: Use the delay slot
677        sethi    %hi(SYM(_ISR_Signals_to_thread_executing)), %l6
678
679        /*
680         *  Finally, check to see if signals were sent to the currently
681         *  executing task.  If so, we need to invoke the interrupt dispatcher.
682         */
683
684        ldub     [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7
685
686        orcc     %l7, %g0, %g0   ! Were signals sent to the currently
687                                 !   executing thread?
688        bz       simple_return   ! yes, then invoke the dispatcher
689                                 ! use the delay slot to clear the signals
690                                 !   to the currently executing task flag
691        st       %g0, [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))]
692
693
694        /*
695         *  Invoke interrupt dispatcher.
696         */
697
698        PUBLIC(_ISR_Dispatch)
699SYM(_ISR_Dispatch):
700        ! Set ISR dispatch nesting prevention flag
701        mov      1,%l6
702        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
703        st       %l6,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
704
705        /*
706         *  The following subtract should get us back on the interrupted
707         *  tasks stack and add enough room to invoke the dispatcher.
708         *  When we enable traps, we are mostly back in the context
709         *  of the task and subsequent interrupts can operate normally.
710         */
711
712        sub      %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
713
714        or      %l0, SPARC_PSR_ET_MASK, %l7    ! l7 = PSR with ET=1
715        mov     %l7, %psr                      !  **** ENABLE TRAPS ****
716        nop
717        nop
718        nop
719isr_dispatch:
720        call    SYM(_Thread_Dispatch), 0
721        nop
722
723        /*
724         *  We invoked _Thread_Dispatch in a state similar to the interrupted
725         *  task.  In order to safely be able to tinker with the register
726         *  windows and get the task back to its pre-interrupt state,
727         *  we need to disable interrupts disabled so we can safely tinker
728         *  with the register windowing.  In particular, the CWP in the PSR
729         *  is fragile during this period. (See PR578.)
730         */
731        mov     2,%g1                           ! syscall (disable interrupts)
732        ta      0                               ! syscall (disable interrupts)
733
734        /*
735         *  While we had ISR dispatching disabled in this thread,
736         *  did we miss anything.  If so, then we need to do another
737         *  _Thread_Dispatch before leaving this ISR Dispatch context.
738         */
739
740        sethi    %hi(SYM(_Context_Switch_necessary)), %l4
741        ldub     [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5
742
743        ! NOTE: Use some of delay slot to start loading this
744        sethi    %hi(SYM(_ISR_Signals_to_thread_executing)), %l6
745        ldub     [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7
746
747        orcc     %l5, %g0, %g0   ! Is thread switch necessary?
748        bnz      dispatchAgain   ! yes, then invoke the dispatcher AGAIN
749        ! NOTE: Use the delay slot to catch the orcc below
750
751        /*
752         *  Finally, check to see if signals were sent to the currently
753         *  executing task.  If so, we need to invoke the interrupt dispatcher.
754         */
755
756        ! NOTE: Delay slots above were used to perform the load AND
757        !       this orcc falls into the delay slot for bnz above
758        orcc     %l7, %g0, %g0   ! Were signals sent to the currently
759                                 !   executing thread?
760        bz       allow_nest_again ! No, then clear out and return
761        ! NOTE: use the delay slot from the bz to load 3 into %g1
762
763        ! Yes, then invoke the dispatcher
764dispatchAgain:
765        mov     3,%g1                           ! syscall (enable interrupts)
766        ta      0                               ! syscall (enable interrupts)
767        ba      isr_dispatch
768        nop
769
770allow_nest_again:
771
772        ! Zero out ISR stack nesting prevention flag
773        sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
774        st       %g0,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
775
776        /*
777         *  The CWP in place at this point may be different from
778         *  that which was in effect at the beginning of the ISR if we
779         *  have been context switched between the beginning of this invocation
780         *  of _ISR_Handler and this point.  Thus the CWP and WIM should
781         *  not be changed back to their values at ISR entry time.  Any
782         *  changes to the PSR must preserve the CWP.
783         */
784
785simple_return:
786        ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
787        wr      %l5, 0, %y
788
789        ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
790        ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
791        rd      %psr, %l3
792        and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
793        andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
794        or      %l3, %l0, %l0                  ! install it later...
795        andn    %l0, SPARC_PSR_ET_MASK, %l0
796
797        /*
798         *  Restore tasks global and out registers
799         */
800
801        mov    %fp, %g1
802
803                                              ! g1 is restored later
804        ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
805        ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
806        ldd     [%fp + ISF_G6_OFFSET], %g6    ! restore g6, g7
807
808        ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
809        ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
810        ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
811        ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
812
813        /*
814         *  Registers:
815         *
816         *   ALL global registers EXCEPT G1 and the input registers have
817         *   already been restored and thuse off limits.
818         *
819         *   The following is the contents of the local registers:
820         *
821         *     l0 = original psr
822         *     l1 = return address (i.e. PC)
823         *     l2 = nPC
824         *     l3 = CWP
825         */
826
827        /*
828         *  if (CWP + 1) is an invalid window then we need to reload it.
829         *
830         *  WARNING: Traps should now be disabled
831         */
832
833        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
834        nop
835        nop
836        nop
837        rd      %wim, %l4
838        add     %l0, 1, %l6                ! l6 = cwp + 1
839        and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
840        srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
841                                           !  and CWP are conveniently LS 5 bits
842        cmp     %l5, 1                     ! Is tasks window invalid?
843        bne     good_task_window
844
845        /*
846         *  The following code is the same as a 1 position left rotate of WIM.
847         */
848
849        sll     %l4, 1, %l5                ! l5 = WIM << 1
850        srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
851                                           ! l4 = WIM >> (Number Windows - 1)
852        or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
853                                           !      (WIM >> (Number Windows - 1))
854
855        /*
856         *  Now restore the window just as if we underflowed to it.
857         */
858
859        wr      %l4, 0, %wim               ! WIM = new WIM
860        nop                                ! must delay after writing WIM
861        nop
862        nop
863        restore                            ! now into the tasks window
864
865        ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
866        ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
867        ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
868        ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
869        ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
870        ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
871        ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
872        ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
873                                           ! reload of sp clobbers ISF
874        save                               ! Back to ISR dispatch window
875
876good_task_window:
877
878        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
879        nop; nop; nop
880                                           !  and restore condition codes.
881        ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
882        jmp     %l1                        ! transfer control and
883        rett    %l2                        ! go back to tasks window
884
885/* end of file */
Note: See TracBrowser for help on using the repository browser.