source: rtems/c/src/lib/libbsp/sparc/shared/irq_asm.S @ 2f8704b6

5
Last change on this file since 2f8704b6 was 2f8704b6, checked in by Daniel Cederman <cederman@…>, on 07/13/17 at 07:26:50

sparc: Add assembly workaround for LEON3FT B2BST errata

This patch adds NOP instructions to prevent instruction sequences
that are sensitive to the LEON3FT B2BST errata. See GRLIB-TN-0009:
"LEON3FT Stale Cache Entry After Store with Data Tag Parity Error"
for more information.

The sequences are only modified if FIX_LEON3FT_B2BST is defined.

The patch works in conjunction with the -mfix-ut700, -mfix-gr712rc,
and -mfix-ut699 GCC flags that prevents the sensitive sequences from
being generated.

Update #3057.

  • Property mode set to 100644
File size: 31.3 KB
Line 
1/*  cpu_asm.s
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language.
6 *
7 *  COPYRIGHT (c) 1989-2011.
8 *  On-Line Applications Research Corporation (OAR).
9 *
10 *  Copyright (c) 2014, 2016 embedded brains GmbH
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.org/license/LICENSE.
15 *
16 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
17 *  Research Corporation (OAR) under contract to the European Space
18 *  Agency (ESA).
19 *
20 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
21 *  European Space Agency.
22 */
23
24#include <rtems/asm.h>
25#include <rtems/score/percpu.h>
26#include <bspopts.h>
27
28#if SPARC_HAS_FPU == 1 && defined(SPARC_USE_SAFE_FP_SUPPORT)
29  #define FP_FRAME_OFFSET_FO_F1 (SPARC_MINIMUM_STACK_FRAME_SIZE + 0)
30  #define FP_FRAME_OFFSET_F2_F3 (FP_FRAME_OFFSET_FO_F1 + 8)
31  #define FP_FRAME_OFFSET_F4_F5 (FP_FRAME_OFFSET_F2_F3 + 8)
32  #define FP_FRAME_OFFSET_F6_F7 (FP_FRAME_OFFSET_F4_F5 + 8)
33  #define FP_FRAME_OFFSET_F8_F9 (FP_FRAME_OFFSET_F6_F7 + 8)
34  #define FP_FRAME_OFFSET_F1O_F11 (FP_FRAME_OFFSET_F8_F9 + 8)
35  #define FP_FRAME_OFFSET_F12_F13 (FP_FRAME_OFFSET_F1O_F11 + 8)
36  #define FP_FRAME_OFFSET_F14_F15 (FP_FRAME_OFFSET_F12_F13 + 8)
37  #define FP_FRAME_OFFSET_F16_F17 (FP_FRAME_OFFSET_F14_F15 + 8)
38  #define FP_FRAME_OFFSET_F18_F19 (FP_FRAME_OFFSET_F16_F17 + 8)
39  #define FP_FRAME_OFFSET_F2O_F21 (FP_FRAME_OFFSET_F18_F19 + 8)
40  #define FP_FRAME_OFFSET_F22_F23 (FP_FRAME_OFFSET_F2O_F21 + 8)
41  #define FP_FRAME_OFFSET_F24_F25 (FP_FRAME_OFFSET_F22_F23 + 8)
42  #define FP_FRAME_OFFSET_F26_F27 (FP_FRAME_OFFSET_F24_F25 + 8)
43  #define FP_FRAME_OFFSET_F28_F29 (FP_FRAME_OFFSET_F26_F27 + 8)
44  #define FP_FRAME_OFFSET_F3O_F31 (FP_FRAME_OFFSET_F28_F29 + 8)
45  #define FP_FRAME_OFFSET_FSR (FP_FRAME_OFFSET_F3O_F31 + 8)
46  #define FP_FRAME_SIZE (FP_FRAME_OFFSET_FSR + 8)
47#endif
48
49/*
50 *  void _CPU_Context_switch(
51 *    Context_Control  *run,
52 *    Context_Control  *heir
53 *  )
54 *
55 *  This routine performs a normal non-FP context switch.
56 */
57
58        .align 4
59        PUBLIC(_CPU_Context_switch)
60SYM(_CPU_Context_switch):
61        st      %g5, [%o0 + G5_OFFSET]       ! save the global registers
62
63        /*
64         * No need to save the thread pointer %g7 since it is a thread
65         * invariant.  It is initialized once in _CPU_Context_Initialize().
66         */
67
68        std     %l0, [%o0 + L0_OFFSET]       ! save the local registers
69        SPARC_LEON3FT_B2BST_NOP
70        std     %l2, [%o0 + L2_OFFSET]
71        SPARC_LEON3FT_B2BST_NOP
72        std     %l4, [%o0 + L4_OFFSET]
73        SPARC_LEON3FT_B2BST_NOP
74        std     %l6, [%o0 + L6_OFFSET]
75        SPARC_LEON3FT_B2BST_NOP
76
77        std     %i0, [%o0 + I0_OFFSET]       ! save the input registers
78        SPARC_LEON3FT_B2BST_NOP
79        std     %i2, [%o0 + I2_OFFSET]
80        SPARC_LEON3FT_B2BST_NOP
81        std     %i4, [%o0 + I4_OFFSET]
82        SPARC_LEON3FT_B2BST_NOP
83        std     %i6, [%o0 + I6_FP_OFFSET]
84        SPARC_LEON3FT_B2BST_NOP
85
86        std     %o6, [%o0 + O6_SP_OFFSET]    ! save the output registers
87
88        ! load the ISR stack nesting prevention flag
89        ld      [%g6 + PER_CPU_ISR_DISPATCH_DISABLE], %o4
90        ! save it a bit later so we do not waste a couple of cycles
91
92        rd      %psr, %o2
93        st      %o2, [%o0 + PSR_OFFSET]      ! save status register
94
95        ! Now actually save ISR stack nesting prevention flag
96        st       %o4, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
97
98        /*
99         *  This is entered from _CPU_Context_restore with:
100         *    o1 = context to restore
101         *    o2 = psr
102         */
103
104        PUBLIC(_CPU_Context_restore_heir)
105SYM(_CPU_Context_restore_heir):
106        /*
107         *  Flush all windows with valid contents except the current one.
108         *  In examining the set register windows, one may logically divide
109         *  the windows into sets (some of which may be empty) based on their
110         *  current status:
111         *
112         *    + current (i.e. in use),
113         *    + used (i.e. a restore would not trap)
114         *    + invalid (i.e. 1 in corresponding bit in WIM)
115         *    + unused
116         *
117         *  Either the used or unused set of windows may be empty.
118         *
119         *  NOTE: We assume only one bit is set in the WIM at a time.
120         *
121         *  Given a CWP of 5 and a WIM of 0x1, the registers are divided
122         *  into sets as follows:
123         *
124         *    + 0   - invalid
125         *    + 1-4 - unused
126         *    + 5   - current
127         *    + 6-7 - used
128         *
129         *  In this case, we only would save the used windows -- 6 and 7.
130         *
131         *   Traps are disabled for the same logical period as in a
132         *     flush all windows trap handler.
133         *
134         *    Register Usage while saving the windows:
135         *      g1 = current PSR
136         *      g2 = current wim
137         *      g3 = CWP
138         *      g4 = wim scratch
139         *      g5 = scratch
140         */
141
142        and     %o2, SPARC_PSR_CWP_MASK, %g3  ! g3 = CWP
143        andn    %o2, SPARC_PSR_ET_MASK, %g1   ! g1 = psr with traps disabled
144        mov     %g1, %psr                     ! **** DISABLE TRAPS ****
145        mov     %wim, %g2                     ! g2 = wim
146        mov     1, %g4
147        sll     %g4, %g3, %g4                 ! g4 = WIM mask for CW invalid
148
149save_frame_loop:
150        sll     %g4, 1, %g5                   ! rotate the "wim" left 1
151        srl     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4
152        or      %g4, %g5, %g4                 ! g4 = wim if we do one restore
153
154        /*
155         *  If a restore would not underflow, then continue.
156         */
157
158        andcc   %g4, %g2, %g0                 ! Any windows to flush?
159        bnz     done_flushing                 ! No, then continue
160        nop
161
162        restore                               ! back one window
163
164        /*
165         *  Now save the window just as if we overflowed to it.
166         */
167
168        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
169        SPARC_LEON3FT_B2BST_NOP
170        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
171        SPARC_LEON3FT_B2BST_NOP
172        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
173        SPARC_LEON3FT_B2BST_NOP
174        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
175        SPARC_LEON3FT_B2BST_NOP
176
177        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
178        SPARC_LEON3FT_B2BST_NOP
179        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
180        SPARC_LEON3FT_B2BST_NOP
181        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
182        SPARC_LEON3FT_B2BST_NOP
183        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
184        SPARC_LEON3FT_B2BST_NOP
185
186        ba      save_frame_loop
187        nop
188
189done_flushing:
190
191        ! Wait three instructions after the write to PSR before using
192        ! non-global registers or instructions affecting the CWP
193        mov     %g1, %psr                     ! restore cwp
194        add     %g3, 1, %g2                   ! calculate desired WIM
195        and     %g2, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g2
196        mov     1, %g4
197        sll     %g4, %g2, %g4                 ! g4 = new WIM
198        mov     %g4, %wim
199
200#if defined(RTEMS_SMP)
201        /*
202         * The executing thread no longer executes on this processor.  Switch
203         * the stack to the temporary interrupt stack of this processor.  Mark
204         * the context of the executing thread as not executing.
205         */
206        add     %g6, PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE, %sp
207        st      %g0, [%o0 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
208
209        ! Try to update the is executing indicator of the heir context
210        mov     1, %g1
211
212.Ltry_update_is_executing:
213
214        swap    [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
215        cmp     %g1, 0
216        bne     .Lcheck_is_executing
217
218        ! The next load is in a delay slot, which is all right
219#endif
220
221        ld      [%o1 + PSR_OFFSET], %g1       ! g1 = heir psr with traps enabled
222        andn    %g1, SPARC_PSR_CWP_MASK, %g1  ! g1 = heir psr w/o cwp
223        or      %g1, %g3, %g1                 ! g1 = heir psr with cwp
224        mov     %g1, %psr                     ! restore status register and
225                                              ! **** ENABLE TRAPS ****
226
227        ld      [%o1 + G5_OFFSET], %g5        ! restore the global registers
228        ld      [%o1 + G7_OFFSET], %g7
229
230        ! Load thread specific ISR dispatch prevention flag
231        ld      [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
232        ! Store it to memory later to use the cycles
233
234        ldd     [%o1 + L0_OFFSET], %l0        ! restore the local registers
235        ldd     [%o1 + L2_OFFSET], %l2
236        ldd     [%o1 + L4_OFFSET], %l4
237        ldd     [%o1 + L6_OFFSET], %l6
238
239        ! Now restore thread specific ISR dispatch prevention flag
240        st      %o2, [%g6 + PER_CPU_ISR_DISPATCH_DISABLE]
241
242        ldd     [%o1 + I0_OFFSET], %i0        ! restore the input registers
243        ldd     [%o1 + I2_OFFSET], %i2
244        ldd     [%o1 + I4_OFFSET], %i4
245        ldd     [%o1 + I6_FP_OFFSET], %i6
246
247        ldd     [%o1 + O6_SP_OFFSET], %o6     ! restore the output registers
248
249        jmp     %o7 + 8                       ! return
250        nop                                   ! delay slot
251
252#if defined(RTEMS_SMP)
253.Lcheck_is_executing:
254
255        ! Check the is executing indicator of the heir context
256        ld      [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
257        cmp     %g1, 0
258        beq     .Ltry_update_is_executing
259         mov    1, %g1
260
261        ! We may have a new heir
262
263        ! Read the executing and heir
264        ld      [%g6 + PER_CPU_OFFSET_EXECUTING], %g2
265        ld      [%g6 + PER_CPU_OFFSET_HEIR], %g4
266
267        ! Update the executing only if necessary to avoid cache line
268        ! monopolization.
269        cmp     %g2, %g4
270        beq     .Ltry_update_is_executing
271         mov    1, %g1
272
273        ! Calculate the heir context pointer
274        sub     %o1, %g2, %g2
275        add     %g2, %g4, %o1
276
277        ! Update the executing
278        st      %g4, [%g6 + PER_CPU_OFFSET_EXECUTING]
279
280        ba      .Ltry_update_is_executing
281         mov    1, %g1
282#endif
283
284/*
285 *  void _CPU_Context_restore(
286 *    Context_Control *new_context
287 *  )
288 *
289 *  This routine is generally used only to perform restart self.
290 *
291 *  NOTE: It is unnecessary to reload some registers.
292 */
293        .align 4
294        PUBLIC(_CPU_Context_restore)
295SYM(_CPU_Context_restore):
296        save    %sp, -SPARC_MINIMUM_STACK_FRAME_SIZE, %sp
297        rd      %psr, %o2
298#if defined(RTEMS_SMP)
299        ! On SPARC the restore path needs also a valid executing context on SMP
300        ! to update the is executing indicator.
301        mov     %i0, %o0
302#endif
303        ba      SYM(_CPU_Context_restore_heir)
304        mov     %i0, %o1                      ! in the delay slot
305
306/*
307 *  void _ISR_Handler()
308 *
309 *  This routine provides the RTEMS interrupt management.
310 *
311 *  We enter this handler from the 4 instructions in the trap table with
312 *  the following registers assumed to be set as shown:
313 *
314 *    l0 = PSR
315 *    l1 = PC
316 *    l2 = nPC
317 *    l3 = trap type
318 *
319 *  NOTE: By an executive defined convention, trap type is between 0 and 255 if
320 *        it is an asynchonous trap and 256 and 511 if it is synchronous.
321 */
322
323        .align 4
324        PUBLIC(_ISR_Handler)
325SYM(_ISR_Handler):
326        /*
327         *  Fix the return address for synchronous traps.
328         */
329
330        andcc   %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
331                                      ! Is this a synchronous trap?
332        be,a    win_ovflow            ! No, then skip the adjustment
333        nop                           ! DELAY
334        mov     %l1, %l6              ! save trapped pc for debug info
335        mov     %l2, %l1              ! do not return to the instruction
336        add     %l2, 4, %l2           ! indicated
337
338win_ovflow:
339        /*
340         *  Save the globals this block uses.
341         *
342         *  These registers are not restored from the locals.  Their contents
343         *  are saved directly from the locals into the ISF below.
344         */
345
346        mov     %g4, %l4                 ! save the globals this block uses
347        mov     %g5, %l5
348
349        /*
350         *  When at a "window overflow" trap, (wim == (1 << cwp)).
351         *  If we get here like that, then process a window overflow.
352         */
353
354        rd      %wim, %g4
355        srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
356                                         !   are LS 5 bits ; how convenient :)
357        cmp     %g5, 1                   ! Is this an invalid window?
358        bne     dont_do_the_window       ! No, then skip all this stuff
359        ! we are using the delay slot
360
361        /*
362         *  The following is same as a 1 position right rotate of WIM
363         */
364
365        srl     %g4, 1, %g5              ! g5 = WIM >> 1
366        sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
367                                         ! g4 = WIM << (Number Windows - 1)
368        or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
369                                         !      (WIM << (Number Windows - 1))
370
371        /*
372         *  At this point:
373         *
374         *    g4 = the new WIM
375         *    g5 is free
376         */
377
378        /*
379         *  Since we are tinkering with the register windows, we need to
380         *  make sure that all the required information is in global registers.
381         */
382
383        save                          ! Save into the window
384        wr      %g4, 0, %wim          ! WIM = new WIM
385        nop                           ! delay slots
386        nop
387        nop
388
389        /*
390         *  Now save the window just as if we overflowed to it.
391         */
392
393        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
394        SPARC_LEON3FT_B2BST_NOP
395        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
396        SPARC_LEON3FT_B2BST_NOP
397        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
398        SPARC_LEON3FT_B2BST_NOP
399        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
400        SPARC_LEON3FT_B2BST_NOP
401
402        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
403        SPARC_LEON3FT_B2BST_NOP
404        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
405        SPARC_LEON3FT_B2BST_NOP
406        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
407        SPARC_LEON3FT_B2BST_NOP
408        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
409
410        restore
411        nop
412
413dont_do_the_window:
414        /*
415         *  Global registers %g4 and %g5 are saved directly from %l4 and
416         *  %l5 directly into the ISF below.
417         */
418
419        /*
420         *  Save the state of the interrupted task -- especially the global
421         *  registers -- in the Interrupt Stack Frame.  Note that the ISF
422         *  includes a regular minimum stack frame which will be used if
423         *  needed by register window overflow and underflow handlers.
424         *
425         *  REGISTERS SAME AS AT _ISR_Handler
426         */
427
428        sub     %fp, CPU_INTERRUPT_FRAME_SIZE, %sp
429                                               ! make space for ISF
430
431        std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
432        SPARC_LEON3FT_B2BST_NOP
433        st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
434        st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
435        std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
436        SPARC_LEON3FT_B2BST_NOP
437        std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
438        SPARC_LEON3FT_B2BST_NOP
439        st      %g7, [%sp + ISF_G7_OFFSET]     ! save g7
440
441        std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
442        SPARC_LEON3FT_B2BST_NOP
443        std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
444        SPARC_LEON3FT_B2BST_NOP
445        std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
446        SPARC_LEON3FT_B2BST_NOP
447        std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
448
449        rd      %y, %g1
450        st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
451        st      %l6, [%sp + ISF_TPC_OFFSET]    ! save real trapped pc
452
453        mov     %sp, %o1                       ! 2nd arg to ISR Handler
454
455        /*
456         *  Increment ISR nest level and Thread dispatch disable level.
457         *
458         *  Register usage for this section:
459         *
460         *    l6 = _Thread_Dispatch_disable_level value
461         *    l7 = _ISR_Nest_level value
462         *
463         *  NOTE: It is assumed that l6 - l7 will be preserved until the ISR
464         *        nest and thread dispatch disable levels are unnested.
465         */
466
467        ld       [%g6 + PER_CPU_ISR_NEST_LEVEL], %l7
468        ld       [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL], %l6
469
470        add      %l7, 1, %l7
471        st       %l7, [%g6 + PER_CPU_ISR_NEST_LEVEL]
472        SPARC_LEON3FT_B2BST_NOP
473
474        add      %l6, 1, %l6
475        st       %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
476
477#if SPARC_HAS_FPU == 1
478        /*
479         * We cannot use an intermediate value for operations with the PSR[EF]
480         * bit since they use a 13-bit sign extension and PSR[EF] is bit 12.
481         */
482        sethi    %hi(SPARC_PSR_EF_MASK), %l5
483#endif
484
485        /*
486         *  If ISR nest level was zero (now 1), then switch stack.
487         */
488
489        mov      %sp, %fp
490        subcc    %l7, 1, %l7             ! outermost interrupt handler?
491        bnz      dont_switch_stacks      ! No, then do not switch stacks
492
493#if defined(RTEMS_PROFILING)
494         sethi   %hi(_SPARC_Counter), %o5
495        ld       [%o5 + %lo(_SPARC_Counter)], %l4
496        call     %l4
497         nop
498        mov      %o0, %o5
499#else
500         nop
501#endif
502
503        ld       [%g6 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
504
505#if SPARC_HAS_FPU == 1
506        /*
507         * Test if the interrupted thread uses the floating point unit
508         * (PSR[EF] == 1).  In case it uses the floating point unit, then store
509         * the floating point status register.  This has the side-effect that
510         * all pending floating point operations complete before the store
511         * completes.  The PSR[EF] bit is restored after the call to the
512         * interrupt handler.  Thus post-switch actions (e.g. signal handlers)
513         * and context switch extensions may still corrupt the floating point
514         * context.
515         */
516        andcc    %l0, %l5, %g0
517        bne,a    dont_switch_stacks
518         st      %fsr, [%g6 + SPARC_PER_CPU_FSR_OFFSET]
519#endif
520
521dont_switch_stacks:
522        /*
523         *  Make sure we have a place on the stack for the window overflow
524         *  trap handler to write into.  At this point it is safe to
525         *  enable traps again.
526         */
527
528        sub      %sp, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp
529
530        /*
531         *  Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
532         *  set the PIL in the %psr to mask off interrupts with lower priority.
533         *  The original %psr in %l0 is not modified since it will be restored
534         *  when the interrupt handler returns.
535         */
536
537        mov      %l0, %g5
538        and      %l3, 0x0ff, %g4
539        subcc    %g4, 0x11, %g0
540        bl       dont_fix_pil
541        subcc    %g4, 0x1f, %g0
542        bg       dont_fix_pil
543        sll      %g4, 8, %g4
544        and      %g4, SPARC_PSR_PIL_MASK, %g4
545        andn     %l0, SPARC_PSR_PIL_MASK, %g5
546        ba       pil_fixed
547        or       %g4, %g5, %g5
548dont_fix_pil:
549        or       %g5, SPARC_PSR_PIL_MASK, %g5
550pil_fixed:
551
552#if SPARC_HAS_FPU == 1
553        /*
554         * Clear the PSR[EF] bit of the interrupted context to ensure that
555         * interrupt service routines cannot corrupt the floating point context.
556         */
557        andn     %g5, %l5, %g5
558#endif
559
560        wr       %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
561
562        /*
563         *  Vector to user's handler.
564         *
565         *  NOTE: TBR may no longer have vector number in it since
566         *        we just enabled traps.  It is definitely in l3.
567         */
568
569        sethi    %hi(SYM(_ISR_Vector_table)), %g4
570        or       %g4, %lo(SYM(_ISR_Vector_table)), %g4
571        and      %l3, 0xFF, %g5         ! remove synchronous trap indicator
572        sll      %g5, 2, %g5            ! g5 = offset into table
573        ld       [%g4 + %g5], %g4       ! g4 = _ISR_Vector_table[ vector ]
574
575
576                                        ! o1 = 2nd arg = address of the ISF
577                                        !   WAS LOADED WHEN ISF WAS SAVED!!!
578        mov      %l3, %o0               ! o0 = 1st arg = vector number
579        call     %g4
580#if defined(RTEMS_PROFILING)
581         mov     %o5, %l3               ! save interrupt entry instant
582#else
583         nop                            ! delay slot
584#endif
585
586#if SPARC_HAS_FPU == 1
587        mov      %l0, %g1               ! PSR[EF] value of interrupted context
588        ta       SPARC_SWTRAP_IRQDIS_FP ! **** DISABLE INTERRUPTS ****
589#else
590        ta       SPARC_SWTRAP_IRQDIS    ! **** DISABLE INTERRUPTS ****
591#endif
592
593#if defined(RTEMS_PROFILING)
594        cmp      %l7, 0
595        bne      profiling_not_outer_most_exit
596         nop
597        call     %l4                    ! Call _SPARC_Counter.counter_read
598         mov     %g1, %l4               ! Save previous interrupt status
599        mov      %o0, %o2               ! o2 = 3rd arg = interrupt exit instant
600        mov      %l3, %o1               ! o1 = 2nd arg = interrupt entry instant
601        call     SYM(_Profiling_Outer_most_interrupt_entry_and_exit)
602         mov     %g6, %o0               ! o0 = 1st arg = per-CPU control
603profiling_not_outer_most_exit:
604#endif
605
606        /*
607         *  Decrement ISR nest level and Thread dispatch disable level.
608         *
609         *  Register usage for this section:
610         *
611         *    o2 = g6->dispatch_necessary value
612         *    o3 = g6->isr_dispatch_disable value
613         *    l6 = g6->thread_dispatch_disable_level value
614         *    l7 = g6->isr_nest_level value
615         */
616
617        ldub     [%g6 + PER_CPU_DISPATCH_NEEDED], %o2
618        ld       [%g6 + PER_CPU_ISR_DISPATCH_DISABLE], %o3
619        st       %l7, [%g6 + PER_CPU_ISR_NEST_LEVEL]
620        SPARC_LEON3FT_B2BST_NOP
621        sub      %l6, 1, %l6
622        st       %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
623
624        /*
625         * Thread dispatching is necessary and allowed if and only if
626         *   g6->dispatch_necessary == 1 and
627         *   g6->isr_dispatch_disable == 0 and
628         *   g6->thread_dispatch_disable_level == 0.
629         *
630         * Otherwise, continue with the simple return.
631         */
632        xor      %o2, 1, %o2
633        or       %o2, %l6, %o2
634        orcc     %o2, %o3, %o2
635        bnz      simple_return
636
637        /*
638         * Switch back on the interrupted tasks stack and add enough room to
639         * invoke the dispatcher.  Doing this in the delay slot causes no harm,
640         * since the stack pointer (%sp) is not used in the simple return path.
641         */
642         sub     %fp, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp
643
644isr_dispatch:
645
646        /* Set ISR dispatch disable and thread dispatch disable level to one */
647        mov      1, %l6
648        st       %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
649        st       %l6, [%g6 + PER_CPU_ISR_DISPATCH_DISABLE]
650
651        /* Call _Thread_Do_dispatch(), this function will enable interrupts */
652
653        mov      0, %o1                 ! ISR level for _Thread_Do_dispatch()
654
655#if SPARC_HAS_FPU == 1 && defined(SPARC_USE_SAFE_FP_SUPPORT)
656        /* Test if we interrupted a floating point thread (PSR[EF] == 1) */
657        andcc   %l0, %l5, %g0
658        be      non_fp_thread_dispatch
659         nop
660
661        /*
662         * Yes, this is a floating point thread, then save the floating point
663         * context to a new stack frame.  Then do the thread dispatch.
664         * Post-switch actions (e.g. signal handlers) and context switch
665         * extensions may safely use the floating point unit.
666         */
667        sub     %sp, FP_FRAME_SIZE, %sp
668        std     %f0, [%sp + FP_FRAME_OFFSET_FO_F1]
669        SPARC_LEON3FT_B2BST_NOP
670        std     %f2, [%sp + FP_FRAME_OFFSET_F2_F3]
671        SPARC_LEON3FT_B2BST_NOP
672        std     %f4, [%sp + FP_FRAME_OFFSET_F4_F5]
673        SPARC_LEON3FT_B2BST_NOP
674        std     %f6, [%sp + FP_FRAME_OFFSET_F6_F7]
675        SPARC_LEON3FT_B2BST_NOP
676        std     %f8, [%sp + FP_FRAME_OFFSET_F8_F9]
677        SPARC_LEON3FT_B2BST_NOP
678        std     %f10, [%sp + FP_FRAME_OFFSET_F1O_F11]
679        SPARC_LEON3FT_B2BST_NOP
680        std     %f12, [%sp + FP_FRAME_OFFSET_F12_F13]
681        SPARC_LEON3FT_B2BST_NOP
682        std     %f14, [%sp + FP_FRAME_OFFSET_F14_F15]
683        SPARC_LEON3FT_B2BST_NOP
684        std     %f16, [%sp + FP_FRAME_OFFSET_F16_F17]
685        SPARC_LEON3FT_B2BST_NOP
686        std     %f18, [%sp + FP_FRAME_OFFSET_F18_F19]
687        SPARC_LEON3FT_B2BST_NOP
688        std     %f20, [%sp + FP_FRAME_OFFSET_F2O_F21]
689        SPARC_LEON3FT_B2BST_NOP
690        std     %f22, [%sp + FP_FRAME_OFFSET_F22_F23]
691        SPARC_LEON3FT_B2BST_NOP
692        std     %f24, [%sp + FP_FRAME_OFFSET_F24_F25]
693        SPARC_LEON3FT_B2BST_NOP
694        std     %f26, [%sp + FP_FRAME_OFFSET_F26_F27]
695        SPARC_LEON3FT_B2BST_NOP
696        std     %f28, [%sp + FP_FRAME_OFFSET_F28_F29]
697        SPARC_LEON3FT_B2BST_NOP
698        std     %f30, [%sp + FP_FRAME_OFFSET_F3O_F31]
699        SPARC_LEON3FT_B2BST_NOP
700        st      %fsr, [%sp + FP_FRAME_OFFSET_FSR]
701        call    SYM(_Thread_Do_dispatch)
702         mov    %g6, %o0
703
704        /*
705         * Restore the floating point context from stack frame and release the
706         * stack frame.
707         */
708        ldd     [%sp + FP_FRAME_OFFSET_FO_F1], %f0
709        ldd     [%sp + FP_FRAME_OFFSET_F2_F3], %f2
710        ldd     [%sp + FP_FRAME_OFFSET_F4_F5], %f4
711        ldd     [%sp + FP_FRAME_OFFSET_F6_F7], %f6
712        ldd     [%sp + FP_FRAME_OFFSET_F8_F9], %f8
713        ldd     [%sp + FP_FRAME_OFFSET_F1O_F11], %f10
714        ldd     [%sp + FP_FRAME_OFFSET_F12_F13], %f12
715        ldd     [%sp + FP_FRAME_OFFSET_F14_F15], %f14
716        ldd     [%sp + FP_FRAME_OFFSET_F16_F17], %f16
717        ldd     [%sp + FP_FRAME_OFFSET_F18_F19], %f18
718        ldd     [%sp + FP_FRAME_OFFSET_F2O_F21], %f20
719        ldd     [%sp + FP_FRAME_OFFSET_F22_F23], %f22
720        ldd     [%sp + FP_FRAME_OFFSET_F24_F25], %f24
721        ldd     [%sp + FP_FRAME_OFFSET_F26_F27], %f26
722        ldd     [%sp + FP_FRAME_OFFSET_F28_F29], %f28
723        ldd     [%sp + FP_FRAME_OFFSET_F3O_F31], %f30
724        ld      [%sp + FP_FRAME_OFFSET_FSR], %fsr
725        ba      thread_dispatch_done
726         add    %sp, FP_FRAME_SIZE, %sp
727
728non_fp_thread_dispatch:
729#endif
730
731        call    SYM(_Thread_Do_dispatch)
732         mov    %g6, %o0
733
734#if SPARC_HAS_FPU == 1 && defined(SPARC_USE_SAFE_FP_SUPPORT)
735thread_dispatch_done:
736#endif
737
738        ta       SPARC_SWTRAP_IRQDIS ! **** DISABLE INTERRUPTS ****
739
740        /*
741         *  While we had ISR dispatching disabled in this thread,
742         *  did we miss anything?  If so, then we need to do another
743         *  _Thread_Do_dispatch() before leaving this ISR dispatch context.
744         */
745        ldub    [%g6 + PER_CPU_DISPATCH_NEEDED], %l7
746
747        orcc    %l7, %g0, %g0        ! Is a thread dispatch necessary?
748        bne     isr_dispatch         ! Yes, then invoke the dispatcher again.
749         mov    0, %o1               ! ISR level for _Thread_Do_dispatch()
750
751        /*
752         * No, then set the ISR dispatch disable flag to zero and continue with
753         * the simple return.
754         */
755        st       %g0, [%g6 + PER_CPU_ISR_DISPATCH_DISABLE]
756
757        /*
758         *  The CWP in place at this point may be different from
759         *  that which was in effect at the beginning of the ISR if we
760         *  have been context switched between the beginning of this invocation
761         *  of _ISR_Handler and this point.  Thus the CWP and WIM should
762         *  not be changed back to their values at ISR entry time.  Any
763         *  changes to the PSR must preserve the CWP.
764         */
765
766simple_return:
767        ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
768        wr      %l5, 0, %y
769
770        ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
771        ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
772        rd      %psr, %l3
773        and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
774        andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
775        or      %l3, %l0, %l0                  ! install it later...
776        andn    %l0, SPARC_PSR_ET_MASK, %l0
777
778        /*
779         *  Restore tasks global and out registers
780         */
781
782        mov    %fp, %g1
783
784                                              ! g1 is restored later
785        ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
786        ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
787        ld      [%fp + ISF_G7_OFFSET], %g7    ! restore g7
788
789        ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
790        ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
791        ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
792        ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
793
794        /*
795         *  Registers:
796         *
797         *   ALL global registers EXCEPT G1 and the input registers have
798         *   already been restored and thuse off limits.
799         *
800         *   The following is the contents of the local registers:
801         *
802         *     l0 = original psr
803         *     l1 = return address (i.e. PC)
804         *     l2 = nPC
805         *     l3 = CWP
806         */
807
808        /*
809         *  if (CWP + 1) is an invalid window then we need to reload it.
810         *
811         *  WARNING: Traps should now be disabled
812         */
813
814        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
815        nop
816        nop
817        nop
818        rd      %wim, %l4
819        add     %l0, 1, %l6                ! l6 = cwp + 1
820        and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
821        srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
822                                           !  and CWP are conveniently LS 5 bits
823        cmp     %l5, 1                     ! Is tasks window invalid?
824        bne     good_task_window
825
826        /*
827         *  The following code is the same as a 1 position left rotate of WIM.
828         */
829
830        sll     %l4, 1, %l5                ! l5 = WIM << 1
831        srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
832                                           ! l4 = WIM >> (Number Windows - 1)
833        or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
834                                           !      (WIM >> (Number Windows - 1))
835
836        /*
837         *  Now restore the window just as if we underflowed to it.
838         */
839
840        wr      %l4, 0, %wim               ! WIM = new WIM
841        nop                                ! must delay after writing WIM
842        nop
843        nop
844        restore                            ! now into the tasks window
845
846        ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
847        ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
848        ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
849        ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
850        ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
851        ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
852        ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
853        ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
854                                           ! reload of sp clobbers ISF
855        save                               ! Back to ISR dispatch window
856
857good_task_window:
858
859        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
860        nop; nop; nop
861                                           !  and restore condition codes.
862        ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
863        jmp     %l1                        ! transfer control and
864        rett    %l2                        ! go back to tasks window
865
866/* end of file */
Note: See TracBrowser for help on using the repository browser.