source: rtems/c/src/lib/libbsp/sparc/shared/irq_asm.S @ 9460333e

5
Last change on this file since 9460333e was 9460333e, checked in by Sebastian Huber <sebastian.huber@…>, on 06/20/16 at 08:08:39

sparc: Rework CPU counter support

Rework CPU counter support to enable use of the GR740 up-counter via
%asr22 and %asr23.

  • Property mode set to 100644
File size: 30.2 KB
Line 
1/*  cpu_asm.s
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language.
6 *
7 *  COPYRIGHT (c) 1989-2011.
8 *  On-Line Applications Research Corporation (OAR).
9 *
10 *  Copyright (c) 2014-2015 embedded brains GmbH
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.org/license/LICENSE.
15 *
16 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
17 *  Research Corporation (OAR) under contract to the European Space
18 *  Agency (ESA).
19 *
20 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
21 *  European Space Agency.
22 */
23
24#include <rtems/asm.h>
25#include <rtems/score/percpu.h>
26#include <bspopts.h>
27
28#if SPARC_HAS_FPU == 1 && defined(SPARC_USE_SAFE_FP_SUPPORT)
29  #define FP_FRAME_OFFSET_FO_F1 (CPU_MINIMUM_STACK_FRAME_SIZE + 0)
30  #define FP_FRAME_OFFSET_F2_F3 (FP_FRAME_OFFSET_FO_F1 + 8)
31  #define FP_FRAME_OFFSET_F4_F5 (FP_FRAME_OFFSET_F2_F3 + 8)
32  #define FP_FRAME_OFFSET_F6_F7 (FP_FRAME_OFFSET_F4_F5 + 8)
33  #define FP_FRAME_OFFSET_F8_F9 (FP_FRAME_OFFSET_F6_F7 + 8)
34  #define FP_FRAME_OFFSET_F1O_F11 (FP_FRAME_OFFSET_F8_F9 + 8)
35  #define FP_FRAME_OFFSET_F12_F13 (FP_FRAME_OFFSET_F1O_F11 + 8)
36  #define FP_FRAME_OFFSET_F14_F15 (FP_FRAME_OFFSET_F12_F13 + 8)
37  #define FP_FRAME_OFFSET_F16_F17 (FP_FRAME_OFFSET_F14_F15 + 8)
38  #define FP_FRAME_OFFSET_F18_F19 (FP_FRAME_OFFSET_F16_F17 + 8)
39  #define FP_FRAME_OFFSET_F2O_F21 (FP_FRAME_OFFSET_F18_F19 + 8)
40  #define FP_FRAME_OFFSET_F22_F23 (FP_FRAME_OFFSET_F2O_F21 + 8)
41  #define FP_FRAME_OFFSET_F24_F25 (FP_FRAME_OFFSET_F22_F23 + 8)
42  #define FP_FRAME_OFFSET_F26_F27 (FP_FRAME_OFFSET_F24_F25 + 8)
43  #define FP_FRAME_OFFSET_F28_F29 (FP_FRAME_OFFSET_F26_F27 + 8)
44  #define FP_FRAME_OFFSET_F3O_F31 (FP_FRAME_OFFSET_F28_F29 + 8)
45  #define FP_FRAME_OFFSET_FSR (FP_FRAME_OFFSET_F3O_F31 + 8)
46  #define FP_FRAME_SIZE (FP_FRAME_OFFSET_FSR + 8)
47#endif
48
49/*
50 *  void _CPU_Context_switch(
51 *    Context_Control  *run,
52 *    Context_Control  *heir
53 *  )
54 *
55 *  This routine performs a normal non-FP context switch.
56 */
57
58        .align 4
59        PUBLIC(_CPU_Context_switch)
60SYM(_CPU_Context_switch):
61        st      %g5, [%o0 + G5_OFFSET]       ! save the global registers
62        st      %g7, [%o0 + G7_OFFSET]
63
64        std     %l0, [%o0 + L0_OFFSET]       ! save the local registers
65        std     %l2, [%o0 + L2_OFFSET]
66        std     %l4, [%o0 + L4_OFFSET]
67        std     %l6, [%o0 + L6_OFFSET]
68
69        std     %i0, [%o0 + I0_OFFSET]       ! save the input registers
70        std     %i2, [%o0 + I2_OFFSET]
71        std     %i4, [%o0 + I4_OFFSET]
72        std     %i6, [%o0 + I6_FP_OFFSET]
73
74        std     %o6, [%o0 + O6_SP_OFFSET]    ! save the output registers
75
76        ! load the ISR stack nesting prevention flag
77        ld      [%g6 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE], %o4
78        ! save it a bit later so we do not waste a couple of cycles
79
80        rd      %psr, %o2
81        st      %o2, [%o0 + PSR_OFFSET]      ! save status register
82
83        ! Now actually save ISR stack nesting prevention flag
84        st       %o4, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
85
86        /*
87         *  This is entered from _CPU_Context_restore with:
88         *    o1 = context to restore
89         *    o2 = psr
90         */
91
92        PUBLIC(_CPU_Context_restore_heir)
93SYM(_CPU_Context_restore_heir):
94        /*
95         *  Flush all windows with valid contents except the current one.
96         *  In examining the set register windows, one may logically divide
97         *  the windows into sets (some of which may be empty) based on their
98         *  current status:
99         *
100         *    + current (i.e. in use),
101         *    + used (i.e. a restore would not trap)
102         *    + invalid (i.e. 1 in corresponding bit in WIM)
103         *    + unused
104         *
105         *  Either the used or unused set of windows may be empty.
106         *
107         *  NOTE: We assume only one bit is set in the WIM at a time.
108         *
109         *  Given a CWP of 5 and a WIM of 0x1, the registers are divided
110         *  into sets as follows:
111         *
112         *    + 0   - invalid
113         *    + 1-4 - unused
114         *    + 5   - current
115         *    + 6-7 - used
116         *
117         *  In this case, we only would save the used windows -- 6 and 7.
118         *
119         *   Traps are disabled for the same logical period as in a
120         *     flush all windows trap handler.
121         *
122         *    Register Usage while saving the windows:
123         *      g1 = current PSR
124         *      g2 = current wim
125         *      g3 = CWP
126         *      g4 = wim scratch
127         *      g5 = scratch
128         */
129
130        and     %o2, SPARC_PSR_CWP_MASK, %g3  ! g3 = CWP
131        andn    %o2, SPARC_PSR_ET_MASK, %g1   ! g1 = psr with traps disabled
132        mov     %g1, %psr                     ! **** DISABLE TRAPS ****
133        mov     %wim, %g2                     ! g2 = wim
134        mov     1, %g4
135        sll     %g4, %g3, %g4                 ! g4 = WIM mask for CW invalid
136
137save_frame_loop:
138        sll     %g4, 1, %g5                   ! rotate the "wim" left 1
139        srl     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4
140        or      %g4, %g5, %g4                 ! g4 = wim if we do one restore
141
142        /*
143         *  If a restore would not underflow, then continue.
144         */
145
146        andcc   %g4, %g2, %g0                 ! Any windows to flush?
147        bnz     done_flushing                 ! No, then continue
148        nop
149
150        restore                               ! back one window
151
152        /*
153         *  Now save the window just as if we overflowed to it.
154         */
155
156        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
157        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
158        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
159        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
160
161        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
162        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
163        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
164        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
165
166        ba      save_frame_loop
167        nop
168
169done_flushing:
170
171        ! Wait three instructions after the write to PSR before using
172        ! non-global registers or instructions affecting the CWP
173        mov     %g1, %psr                     ! restore cwp
174        add     %g3, 1, %g2                   ! calculate desired WIM
175        and     %g2, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g2
176        mov     1, %g4
177        sll     %g4, %g2, %g4                 ! g4 = new WIM
178        mov     %g4, %wim
179
180#if defined(RTEMS_SMP)
181        ! The executing context no longer executes on this processor
182        st      %g0, [%o0 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
183
184        ! Try to update the is executing indicator of the heir context
185        mov     1, %g1
186
187try_update_is_executing:
188
189        swap    [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
190        cmp     %g1, 0
191        bne     check_is_executing
192
193        ! The next load is in a delay slot, which is all right
194#endif
195
196        ld      [%o1 + PSR_OFFSET], %g1       ! g1 = heir psr with traps enabled
197        andn    %g1, SPARC_PSR_CWP_MASK, %g1  ! g1 = heir psr w/o cwp
198        or      %g1, %g3, %g1                 ! g1 = heir psr with cwp
199        mov     %g1, %psr                     ! restore status register and
200                                              ! **** ENABLE TRAPS ****
201
202        ld      [%o1 + G5_OFFSET], %g5        ! restore the global registers
203        ld      [%o1 + G7_OFFSET], %g7
204
205        ! Load thread specific ISR dispatch prevention flag
206        ld      [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
207        ! Store it to memory later to use the cycles
208
209        ldd     [%o1 + L0_OFFSET], %l0        ! restore the local registers
210        ldd     [%o1 + L2_OFFSET], %l2
211        ldd     [%o1 + L4_OFFSET], %l4
212        ldd     [%o1 + L6_OFFSET], %l6
213
214        ! Now restore thread specific ISR dispatch prevention flag
215        st      %o2, [%g6 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
216
217        ldd     [%o1 + I0_OFFSET], %i0        ! restore the input registers
218        ldd     [%o1 + I2_OFFSET], %i2
219        ldd     [%o1 + I4_OFFSET], %i4
220        ldd     [%o1 + I6_FP_OFFSET], %i6
221
222        ldd     [%o1 + O6_SP_OFFSET], %o6     ! restore the output registers
223
224        jmp     %o7 + 8                       ! return
225        nop                                   ! delay slot
226
227#if defined(RTEMS_SMP)
228check_is_executing:
229
230        ! Check the is executing indicator of the heir context
231        ld      [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
232        cmp     %g1, 0
233        beq     try_update_is_executing
234         mov    1, %g1
235
236        ! We may have a new heir
237
238        ! Read the executing and heir
239        ld      [%g6 + PER_CPU_OFFSET_EXECUTING], %g2
240        ld      [%g6 + PER_CPU_OFFSET_HEIR], %g4
241
242        ! Update the executing only if necessary to avoid cache line
243        ! monopolization.
244        cmp     %g2, %g4
245        beq     try_update_is_executing
246         mov    1, %g1
247
248        ! Calculate the heir context pointer
249        sub     %o1, %g2, %g2
250        add     %g2, %g4, %o1
251
252        ! Update the executing
253        st      %g4, [%g6 + PER_CPU_OFFSET_EXECUTING]
254
255        ba      try_update_is_executing
256         mov    1, %g1
257#endif
258
259/*
260 *  void _CPU_Context_restore(
261 *    Context_Control *new_context
262 *  )
263 *
264 *  This routine is generally used only to perform restart self.
265 *
266 *  NOTE: It is unnecessary to reload some registers.
267 */
268        .align 4
269        PUBLIC(_CPU_Context_restore)
270SYM(_CPU_Context_restore):
271        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
272        rd      %psr, %o2
273#if defined(RTEMS_SMP)
274        ! On SPARC the restore path needs also a valid executing context on SMP
275        ! to update the is executing indicator.
276        mov     %i0, %o0
277#endif
278        ba      SYM(_CPU_Context_restore_heir)
279        mov     %i0, %o1                      ! in the delay slot
280
281/*
282 *  void _ISR_Handler()
283 *
284 *  This routine provides the RTEMS interrupt management.
285 *
286 *  We enter this handler from the 4 instructions in the trap table with
287 *  the following registers assumed to be set as shown:
288 *
289 *    l0 = PSR
290 *    l1 = PC
291 *    l2 = nPC
292 *    l3 = trap type
293 *
294 *  NOTE: By an executive defined convention, trap type is between 0 and 255 if
295 *        it is an asynchonous trap and 256 and 511 if it is synchronous.
296 */
297
298        .align 4
299        PUBLIC(_ISR_Handler)
300SYM(_ISR_Handler):
301        /*
302         *  Fix the return address for synchronous traps.
303         */
304
305        andcc   %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
306                                      ! Is this a synchronous trap?
307        be,a    win_ovflow            ! No, then skip the adjustment
308        nop                           ! DELAY
309        mov     %l1, %l6              ! save trapped pc for debug info
310        mov     %l2, %l1              ! do not return to the instruction
311        add     %l2, 4, %l2           ! indicated
312
313win_ovflow:
314        /*
315         *  Save the globals this block uses.
316         *
317         *  These registers are not restored from the locals.  Their contents
318         *  are saved directly from the locals into the ISF below.
319         */
320
321        mov     %g4, %l4                 ! save the globals this block uses
322        mov     %g5, %l5
323
324        /*
325         *  When at a "window overflow" trap, (wim == (1 << cwp)).
326         *  If we get here like that, then process a window overflow.
327         */
328
329        rd      %wim, %g4
330        srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
331                                         !   are LS 5 bits ; how convenient :)
332        cmp     %g5, 1                   ! Is this an invalid window?
333        bne     dont_do_the_window       ! No, then skip all this stuff
334        ! we are using the delay slot
335
336        /*
337         *  The following is same as a 1 position right rotate of WIM
338         */
339
340        srl     %g4, 1, %g5              ! g5 = WIM >> 1
341        sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
342                                         ! g4 = WIM << (Number Windows - 1)
343        or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
344                                         !      (WIM << (Number Windows - 1))
345
346        /*
347         *  At this point:
348         *
349         *    g4 = the new WIM
350         *    g5 is free
351         */
352
353        /*
354         *  Since we are tinkering with the register windows, we need to
355         *  make sure that all the required information is in global registers.
356         */
357
358        save                          ! Save into the window
359        wr      %g4, 0, %wim          ! WIM = new WIM
360        nop                           ! delay slots
361        nop
362        nop
363
364        /*
365         *  Now save the window just as if we overflowed to it.
366         */
367
368        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
369        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
370        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
371        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
372
373        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
374        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
375        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
376        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
377
378        restore
379        nop
380
381dont_do_the_window:
382        /*
383         *  Global registers %g4 and %g5 are saved directly from %l4 and
384         *  %l5 directly into the ISF below.
385         */
386
387        /*
388         *  Save the state of the interrupted task -- especially the global
389         *  registers -- in the Interrupt Stack Frame.  Note that the ISF
390         *  includes a regular minimum stack frame which will be used if
391         *  needed by register window overflow and underflow handlers.
392         *
393         *  REGISTERS SAME AS AT _ISR_Handler
394         */
395
396        sub     %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
397                                               ! make space for ISF
398
399        std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
400        st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
401        st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
402        std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
403        std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
404        st      %g7, [%sp + ISF_G7_OFFSET]     ! save g7
405
406        std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
407        std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
408        std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
409        std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
410
411        rd      %y, %g1
412        st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
413        st      %l6, [%sp + ISF_TPC_OFFSET]    ! save real trapped pc
414
415        mov     %sp, %o1                       ! 2nd arg to ISR Handler
416
417        /*
418         *  Increment ISR nest level and Thread dispatch disable level.
419         *
420         *  Register usage for this section:
421         *
422         *    l6 = _Thread_Dispatch_disable_level value
423         *    l7 = _ISR_Nest_level value
424         *
425         *  NOTE: It is assumed that l6 - l7 will be preserved until the ISR
426         *        nest and thread dispatch disable levels are unnested.
427         */
428
429        ld       [%g6 + PER_CPU_ISR_NEST_LEVEL], %l7
430        ld       [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL], %l6
431
432        add      %l7, 1, %l7
433        st       %l7, [%g6 + PER_CPU_ISR_NEST_LEVEL]
434
435        add      %l6, 1, %l6
436        st       %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
437
438#if SPARC_HAS_FPU == 1
439        /*
440         * We cannot use an intermediate value for operations with the PSR[EF]
441         * bit since they use a 13-bit sign extension and PSR[EF] is bit 12.
442         */
443        sethi    %hi(SPARC_PSR_EF_MASK), %l5
444#endif
445
446        /*
447         *  If ISR nest level was zero (now 1), then switch stack.
448         */
449
450        mov      %sp, %fp
451        subcc    %l7, 1, %l7             ! outermost interrupt handler?
452        bnz      dont_switch_stacks      ! No, then do not switch stacks
453
454#if defined(RTEMS_PROFILING)
455         sethi   %hi(_SPARC_Counter), %o5
456        ld       [%o5 + %lo(_SPARC_Counter)], %l4
457        call     %l4, 0
458         nop
459        mov      %o0, %o5
460#else
461         nop
462#endif
463
464        ld       [%g6 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
465
466#if SPARC_HAS_FPU == 1
467        /*
468         * Test if the interrupted thread uses the floating point unit
469         * (PSR[EF] == 1).  In case it uses the floating point unit, then store
470         * the floating point status register.  This has the side-effect that
471         * all pending floating point operations complete before the store
472         * completes.  The PSR[EF] bit is restored after the call to the
473         * interrupt handler.  Thus post-switch actions (e.g. signal handlers)
474         * and context switch extensions may still corrupt the floating point
475         * context.
476         */
477        andcc    %l0, %l5, %g0
478        bne,a    dont_switch_stacks
479         st      %fsr, [%g6 + SPARC_PER_CPU_FSR_OFFSET]
480#endif
481
482dont_switch_stacks:
483        /*
484         *  Make sure we have a place on the stack for the window overflow
485         *  trap handler to write into.  At this point it is safe to
486         *  enable traps again.
487         */
488
489        sub      %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
490
491        /*
492         *  Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
493         *  set the PIL in the %psr to mask off interrupts with lower priority.
494         *  The original %psr in %l0 is not modified since it will be restored
495         *  when the interrupt handler returns.
496         */
497
498        mov      %l0, %g5
499        and      %l3, 0x0ff, %g4
500        subcc    %g4, 0x11, %g0
501        bl       dont_fix_pil
502        subcc    %g4, 0x1f, %g0
503        bg       dont_fix_pil
504        sll      %g4, 8, %g4
505        and      %g4, SPARC_PSR_PIL_MASK, %g4
506        andn     %l0, SPARC_PSR_PIL_MASK, %g5
507        ba       pil_fixed
508        or       %g4, %g5, %g5
509dont_fix_pil:
510        or       %g5, SPARC_PSR_PIL_MASK, %g5
511pil_fixed:
512
513#if SPARC_HAS_FPU == 1
514        /*
515         * Clear the PSR[EF] bit of the interrupted context to ensure that
516         * interrupt service routines cannot corrupt the floating point context.
517         */
518        andn     %g5, %l5, %g5
519#endif
520
521        wr       %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
522
523        /*
524         *  Vector to user's handler.
525         *
526         *  NOTE: TBR may no longer have vector number in it since
527         *        we just enabled traps.  It is definitely in l3.
528         */
529
530        sethi    %hi(SYM(_ISR_Vector_table)), %g4
531        or       %g4, %lo(SYM(_ISR_Vector_table)), %g4
532        and      %l3, 0xFF, %g5         ! remove synchronous trap indicator
533        sll      %g5, 2, %g5            ! g5 = offset into table
534        ld       [%g4 + %g5], %g4       ! g4 = _ISR_Vector_table[ vector ]
535
536
537                                        ! o1 = 2nd arg = address of the ISF
538                                        !   WAS LOADED WHEN ISF WAS SAVED!!!
539        mov      %l3, %o0               ! o0 = 1st arg = vector number
540        call     %g4, 0
541#if defined(RTEMS_PROFILING)
542         mov     %o5, %l3               ! save interrupt entry instant
543        cmp      %l7, 0
544        bne      profiling_not_outer_most_exit
545         nop
546        ta       SPARC_SWTRAP_IRQDIS    ! Call interrupt disable trap handler
547        call     %l4, 0                 ! Call _SPARC_Counter.counter_read
548         nop
549        mov      %o0, %o2               ! o2 = 3rd arg = interrupt exit instant
550        mov      %l3, %o1               ! o1 = 2nd arg = interrupt entry instant
551        call     SYM(_Profiling_Outer_most_interrupt_entry_and_exit), 0
552         mov     %g6, %o0               ! o0 = 1st arg = per-CPU control
553profiling_not_outer_most_exit:
554#else
555         nop                            ! delay slot
556#endif
557
558        /*
559         *  Redisable traps so we can finish up the interrupt processing.
560         *  This is a VERY conservative place to do this.
561         *
562         *  NOTE: %l0 has the PSR which was in place when we took the trap.
563         */
564
565        mov      %l0, %psr             ! **** DISABLE TRAPS ****
566        nop; nop; nop
567
568        /*
569         *  Decrement ISR nest level and Thread dispatch disable level.
570         *
571         *  Register usage for this section:
572         *
573         *    l6 = _Thread_Dispatch_disable_level value
574         *    l7 = _ISR_Nest_level value
575         */
576
577        st       %l7, [%g6 + PER_CPU_ISR_NEST_LEVEL]
578
579        sub      %l6, 1, %l6
580        st       %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
581
582        /*
583         *  If dispatching is disabled (includes nested interrupt case),
584         *  then do a "simple" exit.
585         */
586
587        orcc     %l6, %g0, %g0   ! Is dispatching disabled?
588        bnz      simple_return   ! Yes, then do a "simple" exit
589        nop
590
591        ! Are we dispatching from a previous ISR in the interrupted thread?
592        ld       [%g6 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE], %l7
593        orcc     %l7, %g0, %g0   ! Is this thread already doing an ISR?
594        bnz      simple_return   ! Yes, then do a "simple" exit
595        nop
596
597
598        /*
599         *  If a context switch is necessary, then do fudge stack to
600         *  return to the interrupt dispatcher.
601         */
602
603        ldub     [%g6 + PER_CPU_DISPATCH_NEEDED], %l6
604
605        orcc     %l6, %g0, %g0   ! Is thread switch necessary?
606        bz       simple_return   ! no, then do a simple return
607        nop
608
609        /*
610         *  Invoke interrupt dispatcher.
611         */
612
613        ! Set ISR dispatch nesting prevention flag
614        mov      1,%l6
615        st       %l6, [%g6 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
616
617        /*
618         *  The following subtract should get us back on the interrupted
619         *  tasks stack and add enough room to invoke the dispatcher.
620         *  When we enable traps, we are mostly back in the context
621         *  of the task and subsequent interrupts can operate normally.
622         */
623
624        sub      %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
625
626        or      %l0, SPARC_PSR_ET_MASK, %l7    ! l7 = PSR with ET=1
627        mov     %l7, %psr                      !  **** ENABLE TRAPS ****
628        nop
629        nop
630        nop
631isr_dispatch:
632
633#if SPARC_HAS_FPU == 1 && defined(SPARC_USE_SAFE_FP_SUPPORT)
634        /* Test if we interrupted a floating point thread (PSR[EF] == 1) */
635        andcc   %l0, %l5, %g0
636        be      non_fp_thread_dispatch
637         nop
638
639        /*
640         * Yes, this is a floating point thread, then save the floating point
641         * context to a new stack frame.  Then do the thread dispatch.
642         * Post-switch actions (e.g. signal handlers) and context switch
643         * extensions may safely use the floating point unit.
644         */
645        sub     %sp, FP_FRAME_SIZE, %sp
646        std     %f0, [%sp + FP_FRAME_OFFSET_FO_F1]
647        std     %f2, [%sp + FP_FRAME_OFFSET_F2_F3]
648        std     %f4, [%sp + FP_FRAME_OFFSET_F4_F5]
649        std     %f6, [%sp + FP_FRAME_OFFSET_F6_F7]
650        std     %f8, [%sp + FP_FRAME_OFFSET_F8_F9]
651        std     %f10, [%sp + FP_FRAME_OFFSET_F1O_F11]
652        std     %f12, [%sp + FP_FRAME_OFFSET_F12_F13]
653        std     %f14, [%sp + FP_FRAME_OFFSET_F14_F15]
654        std     %f16, [%sp + FP_FRAME_OFFSET_F16_F17]
655        std     %f18, [%sp + FP_FRAME_OFFSET_F18_F19]
656        std     %f20, [%sp + FP_FRAME_OFFSET_F2O_F21]
657        std     %f22, [%sp + FP_FRAME_OFFSET_F22_F23]
658        std     %f24, [%sp + FP_FRAME_OFFSET_F24_F25]
659        std     %f26, [%sp + FP_FRAME_OFFSET_F26_F27]
660        std     %f28, [%sp + FP_FRAME_OFFSET_F28_F29]
661        std     %f30, [%sp + FP_FRAME_OFFSET_F3O_F31]
662        call    SYM(_Thread_Dispatch), 0
663         st     %fsr, [%sp + FP_FRAME_OFFSET_FSR]
664
665        /*
666         * Restore the floating point context from stack frame and release the
667         * stack frame.
668         */
669        ldd     [%sp + FP_FRAME_OFFSET_FO_F1], %f0
670        ldd     [%sp + FP_FRAME_OFFSET_F2_F3], %f2
671        ldd     [%sp + FP_FRAME_OFFSET_F4_F5], %f4
672        ldd     [%sp + FP_FRAME_OFFSET_F6_F7], %f6
673        ldd     [%sp + FP_FRAME_OFFSET_F8_F9], %f8
674        ldd     [%sp + FP_FRAME_OFFSET_F1O_F11], %f10
675        ldd     [%sp + FP_FRAME_OFFSET_F12_F13], %f12
676        ldd     [%sp + FP_FRAME_OFFSET_F14_F15], %f14
677        ldd     [%sp + FP_FRAME_OFFSET_F16_F17], %f16
678        ldd     [%sp + FP_FRAME_OFFSET_F18_F19], %f18
679        ldd     [%sp + FP_FRAME_OFFSET_F2O_F21], %f20
680        ldd     [%sp + FP_FRAME_OFFSET_F22_F23], %f22
681        ldd     [%sp + FP_FRAME_OFFSET_F24_F25], %f24
682        ldd     [%sp + FP_FRAME_OFFSET_F26_F27], %f26
683        ldd     [%sp + FP_FRAME_OFFSET_F28_F29], %f28
684        ldd     [%sp + FP_FRAME_OFFSET_F3O_F31], %f30
685        ld      [%sp + FP_FRAME_OFFSET_FSR], %fsr
686        ba      thread_dispatch_done
687         add    %sp, FP_FRAME_SIZE, %sp
688
689non_fp_thread_dispatch:
690#endif
691
692        call    SYM(_Thread_Dispatch), 0
693         nop
694
695#if SPARC_HAS_FPU == 1 && defined(SPARC_USE_SAFE_FP_SUPPORT)
696thread_dispatch_done:
697#endif
698
699        /*
700         *  We invoked _Thread_Dispatch in a state similar to the interrupted
701         *  task.  In order to safely be able to tinker with the register
702         *  windows and get the task back to its pre-interrupt state,
703         *  we need to disable interrupts disabled so we can safely tinker
704         *  with the register windowing.  In particular, the CWP in the PSR
705         *  is fragile during this period. (See PR578.)
706         */
707        ta      SPARC_SWTRAP_IRQDIS  ! syscall (disable interrupts)
708
709        /*
710         *  While we had ISR dispatching disabled in this thread,
711         *  did we miss anything.  If so, then we need to do another
712         *  _Thread_Dispatch before leaving this ISR Dispatch context.
713         */
714        ldub    [%g6 + PER_CPU_DISPATCH_NEEDED], %l7
715
716        orcc    %l7, %g0, %g0        ! Is thread switch necesary?
717        bne,a   isr_dispatch         ! Yes, then invoke the dispatcher.
718                                     ! g1 = Old PSR PIL returned from IRQDis
719        ta      SPARC_SWTRAP_IRQEN   ! syscall (enable interrupts to same level)
720
721        ! No, then clear out and return
722
723        ! Zero out ISR stack nesting prevention flag
724        st       %g0, [%g6 + SPARC_PER_CPU_ISR_DISPATCH_DISABLE]
725
726        /*
727         *  The CWP in place at this point may be different from
728         *  that which was in effect at the beginning of the ISR if we
729         *  have been context switched between the beginning of this invocation
730         *  of _ISR_Handler and this point.  Thus the CWP and WIM should
731         *  not be changed back to their values at ISR entry time.  Any
732         *  changes to the PSR must preserve the CWP.
733         */
734
735simple_return:
736        ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
737        wr      %l5, 0, %y
738
739        ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
740        ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
741        rd      %psr, %l3
742        and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
743        andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
744        or      %l3, %l0, %l0                  ! install it later...
745        andn    %l0, SPARC_PSR_ET_MASK, %l0
746
747        /*
748         *  Restore tasks global and out registers
749         */
750
751        mov    %fp, %g1
752
753                                              ! g1 is restored later
754        ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
755        ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
756        ld      [%fp + ISF_G7_OFFSET], %g7    ! restore g7
757
758        ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
759        ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
760        ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
761        ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
762
763        /*
764         *  Registers:
765         *
766         *   ALL global registers EXCEPT G1 and the input registers have
767         *   already been restored and thuse off limits.
768         *
769         *   The following is the contents of the local registers:
770         *
771         *     l0 = original psr
772         *     l1 = return address (i.e. PC)
773         *     l2 = nPC
774         *     l3 = CWP
775         */
776
777        /*
778         *  if (CWP + 1) is an invalid window then we need to reload it.
779         *
780         *  WARNING: Traps should now be disabled
781         */
782
783        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
784        nop
785        nop
786        nop
787        rd      %wim, %l4
788        add     %l0, 1, %l6                ! l6 = cwp + 1
789        and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
790        srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
791                                           !  and CWP are conveniently LS 5 bits
792        cmp     %l5, 1                     ! Is tasks window invalid?
793        bne     good_task_window
794
795        /*
796         *  The following code is the same as a 1 position left rotate of WIM.
797         */
798
799        sll     %l4, 1, %l5                ! l5 = WIM << 1
800        srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
801                                           ! l4 = WIM >> (Number Windows - 1)
802        or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
803                                           !      (WIM >> (Number Windows - 1))
804
805        /*
806         *  Now restore the window just as if we underflowed to it.
807         */
808
809        wr      %l4, 0, %wim               ! WIM = new WIM
810        nop                                ! must delay after writing WIM
811        nop
812        nop
813        restore                            ! now into the tasks window
814
815        ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
816        ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
817        ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
818        ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
819        ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
820        ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
821        ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
822        ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
823                                           ! reload of sp clobbers ISF
824        save                               ! Back to ISR dispatch window
825
826good_task_window:
827
828        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
829        nop; nop; nop
830                                           !  and restore condition codes.
831        ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
832        jmp     %l1                        ! transfer control and
833        rett    %l2                        ! go back to tasks window
834
835/* end of file */
Note: See TracBrowser for help on using the repository browser.