source: rtems/cpukit/score/cpu/sparc/cpu_asm.S

Last change on this file was b6dc4b4, checked in by Sebastian Huber <sebastian.huber@…>, on 09/14/23 at 14:08:53

sparc: Move CPU counter implementation

Enable a BSP-specific CPU counter implementation.

Update #4954.

  • Property mode set to 100644
File size: 33.2 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/*  cpu_asm.s
4 *
5 *  This file contains the basic algorithms for all assembly code used
6 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
7 *  in assembly language.
8 *
9 *  COPYRIGHT (c) 1989-2011.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (C) 2014, 2017 embedded brains GmbH & Co. KG
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 *
35 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
36 *  Research Corporation (OAR) under contract to the European Space
37 *  Agency (ESA).
38 *
39 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
40 *  European Space Agency.
41 */
42
43#include <rtems/asm.h>
44#include <rtems/score/percpu.h>
45#include <libcpu/grlib-tn-0018.h>
46
47/*
48 *  void _CPU_Context_switch(
49 *    Context_Control  *run,
50 *    Context_Control  *heir
51 *  )
52 *
53 *  This routine performs a normal non-FP context switch.
54 */
55
56        .align 4
57        PUBLIC(_CPU_Context_switch)
58        PUBLIC(_CPU_Context_switch_no_return)
59SYM(_CPU_Context_switch):
60SYM(_CPU_Context_switch_no_return):
61        st      %g5, [%o0 + G5_OFFSET]       ! save the global registers
62
63        /*
64         * No need to save the thread pointer %g7 since it is a thread
65         * invariant.  It is initialized once in _CPU_Context_Initialize().
66         */
67
68        std     %l0, [%o0 + L0_OFFSET]       ! save the local registers
69        SPARC_LEON3FT_B2BST_NOP
70        std     %l2, [%o0 + L2_OFFSET]
71        SPARC_LEON3FT_B2BST_NOP
72        std     %l4, [%o0 + L4_OFFSET]
73        SPARC_LEON3FT_B2BST_NOP
74        std     %l6, [%o0 + L6_OFFSET]
75        SPARC_LEON3FT_B2BST_NOP
76
77        std     %i0, [%o0 + I0_OFFSET]       ! save the input registers
78        SPARC_LEON3FT_B2BST_NOP
79        std     %i2, [%o0 + I2_OFFSET]
80        SPARC_LEON3FT_B2BST_NOP
81        std     %i4, [%o0 + I4_OFFSET]
82        SPARC_LEON3FT_B2BST_NOP
83        std     %i6, [%o0 + I6_FP_OFFSET]
84        SPARC_LEON3FT_B2BST_NOP
85
86        std     %o6, [%o0 + O6_SP_OFFSET]    ! save the output registers
87
88        ! load the ISR stack nesting prevention flag
89        ld      [%g6 + PER_CPU_ISR_DISPATCH_DISABLE], %o4
90        ! save it a bit later so we do not waste a couple of cycles
91
92        rd      %psr, %o2
93        st      %o2, [%o0 + PSR_OFFSET]      ! save status register
94
95        ! Now actually save ISR stack nesting prevention flag
96        st       %o4, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
97
98        /*
99         *  This is entered from _CPU_Context_restore with:
100         *    o1 = context to restore
101         *    o2 = psr
102         */
103
104        PUBLIC(_CPU_Context_restore_heir)
105SYM(_CPU_Context_restore_heir):
106        /*
107         *  Flush all windows with valid contents except the current one.
108         *  In examining the set register windows, one may logically divide
109         *  the windows into sets (some of which may be empty) based on their
110         *  current status:
111         *
112         *    + current (i.e. in use),
113         *    + used (i.e. a restore would not trap)
114         *    + invalid (i.e. 1 in corresponding bit in WIM)
115         *    + unused
116         *
117         *  Either the used or unused set of windows may be empty.
118         *
119         *  NOTE: We assume only one bit is set in the WIM at a time.
120         *
121         *  Given a CWP of 5 and a WIM of 0x1, the registers are divided
122         *  into sets as follows:
123         *
124         *    + 0   - invalid
125         *    + 1-4 - unused
126         *    + 5   - current
127         *    + 6-7 - used
128         *
129         *  In this case, we only would save the used windows -- 6 and 7.
130         *
131         *   Traps are disabled for the same logical period as in a
132         *     flush all windows trap handler.
133         *
134         *    Register Usage while saving the windows:
135         *      g1 = current PSR
136         *      g2 = current wim
137         *      g3 = CWP
138         *      g4 = wim scratch
139         *      g5 = scratch
140         */
141
142        and     %o2, SPARC_PSR_CWP_MASK, %g3  ! g3 = CWP
143        andn    %o2, SPARC_PSR_ET_MASK, %g1   ! g1 = psr with traps disabled
144        mov     %g1, %psr                     ! **** DISABLE TRAPS ****
145        mov     %wim, %g2                     ! g2 = wim
146        mov     1, %g4
147        sll     %g4, %g3, %g4                 ! g4 = WIM mask for CW invalid
148
149save_frame_loop:
150        sll     %g4, 1, %g5                   ! rotate the "wim" left 1
151        srl     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4
152        or      %g4, %g5, %g4                 ! g4 = wim if we do one restore
153
154        /*
155         *  If a restore would not underflow, then continue.
156         */
157
158        andcc   %g4, %g2, %g0                 ! Any windows to flush?
159        bnz     done_flushing                 ! No, then continue
160        nop
161
162        restore                               ! back one window
163
164        /*
165         *  Now save the window just as if we overflowed to it.
166         */
167
168        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
169        SPARC_LEON3FT_B2BST_NOP
170        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
171        SPARC_LEON3FT_B2BST_NOP
172        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
173        SPARC_LEON3FT_B2BST_NOP
174        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
175        SPARC_LEON3FT_B2BST_NOP
176
177        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
178        SPARC_LEON3FT_B2BST_NOP
179        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
180        SPARC_LEON3FT_B2BST_NOP
181        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
182        SPARC_LEON3FT_B2BST_NOP
183        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
184        SPARC_LEON3FT_B2BST_NOP
185
186        ba      save_frame_loop
187        nop
188
189done_flushing:
190
191        ! Wait three instructions after the write to PSR before using
192        ! non-global registers or instructions affecting the CWP
193        mov     %g1, %psr                     ! restore cwp
194        add     %g3, 1, %g2                   ! calculate desired WIM
195        and     %g2, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g2
196        mov     1, %g4
197        sll     %g4, %g2, %g4                 ! g4 = new WIM
198        mov     %g4, %wim
199
200#if defined(RTEMS_SMP)
201        /*
202         * The executing thread no longer executes on this processor.  Switch
203         * the stack to the temporary interrupt stack of this processor.  Mark
204         * the context of the executing thread as not executing.
205         */
206        add     %g6, PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE, %sp
207        st      %g0, [%o0 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
208
209        ! Try to update the is executing indicator of the heir context
210        mov     1, %g1
211
212#if defined(__FIX_LEON3FT_B2BST)
213        /*
214         * This is a workaround for GRLIB-TN-0011 (Technical Note on LEON3/FT
215         * AHB Lock Release During Atomic Operation).  Affected components are
216         * the GR712RC, UT699, UT699E, UT700, and LEON3FT-RTAX.  Strictly, the
217         * workaround is only necessary if the MMU is enabled.  Using the
218         * __FIX_LEON3FT_B2BST is not 100% appropriate, but the best thing we
219         * can use to enable the workaround.  An alignment padding is filled
220         * with nops.
221         */
222.align 16
223#endif
224.Ltry_update_is_executing:
225
226        swap    [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
227        cmp     %g1, 0
228        bne     .Lcheck_is_executing
229
230        ! The next load is in a delay slot, which is all right
231#endif
232
233#if defined(SPARC_USE_LAZY_FP_SWITCH)
234        ld      [%g6 + SPARC_PER_CPU_FP_OWNER_OFFSET], %g2
235#endif
236        ld      [%o1 + PSR_OFFSET], %g1       ! g1 = heir psr with traps enabled
237#if defined(SPARC_USE_LAZY_FP_SWITCH)
238        sethi   %hi(SPARC_PSR_EF_MASK), %g5
239        cmp     %g2, %g0
240        bne,a   .Lclear_psr_ef_done
241         andn   %g1, %g5, %g1                 ! g1 = heir psr w/o PSR[EF]
242.Lclear_psr_ef_done:
243#endif
244        andn    %g1, SPARC_PSR_CWP_MASK, %g1  ! g1 = heir psr w/o cwp
245        or      %g1, %g3, %g1                 ! g1 = heir psr with cwp
246        mov     %g1, %psr                     ! restore status register and
247                                              ! **** ENABLE TRAPS ****
248
249        /*
250         * WARNING: This code does not run with the restored stack pointer.  In
251         * SMP configurations, it uses a processor-specific stack.  In
252         * uniprocessor configurations, it uses the stack of the caller.  In
253         * this case, the caller shall ensure that it is not the interrupt
254         * stack (which is also the system initialization stack).
255         */
256
257        ld      [%o1 + G5_OFFSET], %g5        ! restore the global registers
258        ld      [%o1 + G7_OFFSET], %g7
259
260        ! Load thread specific ISR dispatch prevention flag
261        ld      [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
262        ! Store it to memory later to use the cycles
263
264        ldd     [%o1 + L0_OFFSET], %l0        ! restore the local registers
265        ldd     [%o1 + L2_OFFSET], %l2
266        ldd     [%o1 + L4_OFFSET], %l4
267        ldd     [%o1 + L6_OFFSET], %l6
268
269        ! Now restore thread specific ISR dispatch prevention flag
270        st      %o2, [%g6 + PER_CPU_ISR_DISPATCH_DISABLE]
271
272        ldd     [%o1 + I0_OFFSET], %i0        ! restore the input registers
273        ldd     [%o1 + I2_OFFSET], %i2
274        ldd     [%o1 + I4_OFFSET], %i4
275        ldd     [%o1 + I6_FP_OFFSET], %i6
276
277        ldd     [%o1 + O6_SP_OFFSET], %o6     ! restore the non-volatile output
278                                              ! registers (stack pointer,
279                                              ! link register)
280
281        jmp     %o7 + 8                       ! return
282        nop                                   ! delay slot
283
284#if defined(RTEMS_SMP)
285.Lcheck_is_executing:
286
287        ! Check the is executing indicator of the heir context
288        ld      [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
289        cmp     %g1, 0
290        beq     .Ltry_update_is_executing
291         mov    1, %g1
292
293        ! We may have a new heir
294
295        ! Read the executing and heir
296        ld      [%g6 + PER_CPU_OFFSET_EXECUTING], %g2
297        ld      [%g6 + PER_CPU_OFFSET_HEIR], %g4
298
299        ! Update the executing only if necessary to avoid cache line
300        ! monopolization.
301        cmp     %g2, %g4
302        beq     .Ltry_update_is_executing
303         mov    1, %g1
304
305        ! Calculate the heir context pointer
306        sub     %o1, %g2, %g2
307        add     %g2, %g4, %o1
308
309        ! Update the executing
310        st      %g4, [%g6 + PER_CPU_OFFSET_EXECUTING]
311
312        ba      .Ltry_update_is_executing
313         mov    1, %g1
314#endif
315
316/*
317 *  void _CPU_Context_restore(
318 *    Context_Control *new_context
319 *  )
320 *
321 *  This routine is generally used only to perform restart self.
322 *
323 *  NOTE: It is unnecessary to reload some registers.
324 */
325        .align 4
326        PUBLIC(_CPU_Context_restore)
327SYM(_CPU_Context_restore):
328        save    %sp, -SPARC_MINIMUM_STACK_FRAME_SIZE, %sp
329        rd      %psr, %o2
330#if defined(RTEMS_SMP)
331        ! On SPARC the restore path needs also a valid executing context on SMP
332        ! to update the is executing indicator.
333        mov     %i0, %o0
334#endif
335        ba      SYM(_CPU_Context_restore_heir)
336        mov     %i0, %o1                      ! in the delay slot
337
338#if !defined(RTEMS_SMP)
339        .align 4
340        PUBLIC(_SPARC_Start_multitasking)
341SYM(_SPARC_Start_multitasking):
342        /*
343         * Restore the stack pointer right now, so that the window flushing and
344         * interrupts during _CPU_Context_restore_heir() use the stack of the
345         * heir thread.  This is crucial for the interrupt handling to prevent
346         * a concurrent use of the interrupt stack (which is also the system
347         * initialization stack).
348         */
349        ld      [%o0 + O6_SP_OFFSET], %o6
350
351        ba      SYM(_CPU_Context_restore)
352         nop
353#endif
354
355/*
356 *  void _SPARC_Interrupt_trap()
357 *
358 *  This routine provides the RTEMS interrupt management.
359 *
360 *  We enter this handler from the 4 instructions in the trap table with
361 *  the following registers assumed to be set as shown:
362 *
363 *    l0 = PSR
364 *    l1 = PC
365 *    l2 = nPC
366 *    l3 = interrupt vector number (this is not the trap type)
367 *
368 *  NOTE: This trap handler is intended to service external interrupts.
369 */
370
371        .align 4
372        PUBLIC(_SPARC_Interrupt_trap)
373SYM(_SPARC_Interrupt_trap):
374        /*
375         *  Save the globals this block uses.
376         *
377         *  These registers are not restored from the locals.  Their contents
378         *  are saved directly from the locals into the ISF below.
379         */
380
381        mov     %g4, %l4                 ! save the globals this block uses
382        mov     %g5, %l5
383
384        /*
385         *  When at a "window overflow" trap, (wim == (1 << cwp)).
386         *  If we get here like that, then process a window overflow.
387         */
388
389        rd      %wim, %g4
390        srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
391                                         !   are LS 5 bits ; how convenient :)
392        cmp     %g5, 1                   ! Is this an invalid window?
393        bne     dont_do_the_window       ! No, then skip all this stuff
394        ! we are using the delay slot
395
396        /*
397         *  The following is same as a 1 position right rotate of WIM
398         */
399
400        srl     %g4, 1, %g5              ! g5 = WIM >> 1
401        sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
402                                         ! g4 = WIM << (Number Windows - 1)
403        or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
404                                         !      (WIM << (Number Windows - 1))
405
406        /*
407         *  At this point:
408         *
409         *    g4 = the new WIM
410         *    g5 is free
411         */
412
413        /*
414         *  Since we are tinkering with the register windows, we need to
415         *  make sure that all the required information is in global registers.
416         */
417
418        save                          ! Save into the window
419        wr      %g4, 0, %wim          ! WIM = new WIM
420        nop                           ! delay slots
421        nop
422        nop
423
424        /*
425         *  Now save the window just as if we overflowed to it.
426         */
427
428        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
429        SPARC_LEON3FT_B2BST_NOP
430        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
431        SPARC_LEON3FT_B2BST_NOP
432        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
433        SPARC_LEON3FT_B2BST_NOP
434        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
435        SPARC_LEON3FT_B2BST_NOP
436
437        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
438        SPARC_LEON3FT_B2BST_NOP
439        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
440        SPARC_LEON3FT_B2BST_NOP
441        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
442        SPARC_LEON3FT_B2BST_NOP
443        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
444
445        restore
446        nop
447
448dont_do_the_window:
449        /*
450         *  Global registers %g4 and %g5 are saved directly from %l4 and
451         *  %l5 directly into the ISF below.
452         */
453
454        /*
455         *  Save the state of the interrupted task -- especially the global
456         *  registers -- in the Interrupt Stack Frame.  Note that the ISF
457         *  includes a regular minimum stack frame which will be used if
458         *  needed by register window overflow and underflow handlers.
459         *
460         *  REGISTERS SAME AS AT _SPARC_Interrupt_trap()
461         */
462
463        sub     %fp, CPU_INTERRUPT_FRAME_SIZE, %sp
464                                               ! make space for ISF
465
466        std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
467        SPARC_LEON3FT_B2BST_NOP
468        st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
469        st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
470        std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
471        SPARC_LEON3FT_B2BST_NOP
472        std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
473        SPARC_LEON3FT_B2BST_NOP
474        st      %g7, [%sp + ISF_G7_OFFSET]     ! save g7
475
476        std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
477        SPARC_LEON3FT_B2BST_NOP
478        std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
479        SPARC_LEON3FT_B2BST_NOP
480        std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
481        SPARC_LEON3FT_B2BST_NOP
482        std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
483
484        rd      %y, %g1
485        st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
486
487        /*
488         *  Increment ISR nest level and Thread dispatch disable level.
489         *
490         *  Register usage for this section:
491         *
492         *    l6 = _Thread_Dispatch_disable_level value
493         *    l7 = _ISR_Nest_level value
494         *
495         *  NOTE: It is assumed that l6 - l7 will be preserved until the ISR
496         *        nest and thread dispatch disable levels are unnested.
497         */
498
499        ld       [%g6 + PER_CPU_ISR_NEST_LEVEL], %l7
500        ld       [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL], %l6
501
502        add      %l7, 1, %l7
503        st       %l7, [%g6 + PER_CPU_ISR_NEST_LEVEL]
504        SPARC_LEON3FT_B2BST_NOP
505
506        add      %l6, 1, %l6
507        st       %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
508
509#if SPARC_HAS_FPU == 1
510        /*
511         * We cannot use an intermediate value for operations with the PSR[EF]
512         * bit since they use a 13-bit sign extension and PSR[EF] is bit 12.
513         */
514        sethi    %hi(SPARC_PSR_EF_MASK), %l5
515#endif
516
517        /*
518         *  If ISR nest level was zero (now 1), then switch stack.
519         */
520
521        mov      %sp, %fp
522        subcc    %l7, 1, %l7             ! outermost interrupt handler?
523        bnz      dont_switch_stacks      ! No, then do not switch stacks
524
525#if defined(RTEMS_PROFILING)
526        call     SYM(_SPARC_Counter_read_ISR_disabled)
527         nop
528        mov      %o0, %o5
529#else
530         nop
531#endif
532
533        ld       [%g6 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
534
535#if SPARC_HAS_FPU == 1
536        /*
537         * Test if the interrupted thread uses the floating point unit
538         * (PSR[EF] == 1).  In case it uses the floating point unit, then store
539         * the floating point status register.  This has the side-effect that
540         * all pending floating point operations complete before the store
541         * completes.  The PSR[EF] bit is restored after the call to the
542         * interrupt handler.  Thus post-switch actions (e.g. signal handlers)
543         * and context switch extensions may still corrupt the floating point
544         * context.
545         */
546        andcc    %l0, %l5, %g0
547        beq      dont_switch_stacks
548         nop
549        st      %fsr, [%g6 + SPARC_PER_CPU_FSR_OFFSET]
550#endif
551
552dont_switch_stacks:
553        /*
554         *  Make sure we have a place on the stack for the window overflow
555         *  trap handler to write into.  At this point it is safe to
556         *  enable traps again.
557         */
558
559        sub      %sp, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp
560
561        /*
562         *  Set the PIL in the %psr to mask off interrupts with lower priority.
563         *  The original %psr in %l0 is not modified since it will be restored
564         *  when the interrupt handler returns.
565         */
566
567        mov      %l0, %g5
568        sll      %l3, 8, %g4
569        and      %g4, SPARC_PSR_PIL_MASK, %g4
570        andn     %l0, SPARC_PSR_PIL_MASK, %g5
571        or       %g4, %g5, %g5
572
573#if SPARC_HAS_FPU == 1
574        /*
575         * Clear the PSR[EF] bit of the interrupted context to ensure that
576         * interrupt service routines cannot corrupt the floating point context.
577         */
578        andn     %g5, %l5, %g5
579#endif
580
581        wr       %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
582
583        /*
584         *  Call _SPARC_Interrupt_dispatch( %l3 )
585         */
586        mov      %l3, %o0               ! o0 = 1st arg = vector number
587        call     SYM(_SPARC_Interrupt_dispatch)
588#if defined(RTEMS_PROFILING)
589         mov     %o5, %l3               ! save interrupt entry instant
590#else
591         nop                            ! delay slot
592#endif
593
594#if defined(SPARC_USE_SYNCHRONOUS_FP_SWITCH)
595        mov      %l0, %g1               ! PSR[EF] value of interrupted context
596        ta       SPARC_SWTRAP_IRQDIS_FP ! **** DISABLE INTERRUPTS ****
597#else
598        ta       SPARC_SWTRAP_IRQDIS    ! **** DISABLE INTERRUPTS ****
599#endif
600
601#if defined(RTEMS_PROFILING)
602        cmp      %l7, 0
603        bne      profiling_not_outer_most_exit
604         nop
605        call     SYM(_SPARC_Counter_read_ISR_disabled)
606         mov     %g1, %l4               ! Save previous interrupt status
607        mov      %o0, %o2               ! o2 = 3rd arg = interrupt exit instant
608        mov      %l3, %o1               ! o1 = 2nd arg = interrupt entry instant
609        call     SYM(_Profiling_Outer_most_interrupt_entry_and_exit)
610         mov     %g6, %o0               ! o0 = 1st arg = per-CPU control
611profiling_not_outer_most_exit:
612#endif
613
614        /*
615         *  Decrement ISR nest level and Thread dispatch disable level.
616         *
617         *  Register usage for this section:
618         *
619         *    o2 = g6->dispatch_necessary value
620         *    o3 = g6->isr_dispatch_disable value
621         *    l6 = g6->thread_dispatch_disable_level value
622         *    l7 = g6->isr_nest_level value
623         */
624
625        ldub     [%g6 + PER_CPU_DISPATCH_NEEDED], %o2
626        ld       [%g6 + PER_CPU_ISR_DISPATCH_DISABLE], %o3
627        st       %l7, [%g6 + PER_CPU_ISR_NEST_LEVEL]
628        SPARC_LEON3FT_B2BST_NOP
629        sub      %l6, 1, %l6
630        st       %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
631
632        /*
633         * Thread dispatching is necessary and allowed if and only if
634         *   g6->dispatch_necessary == 1 and
635         *   g6->isr_dispatch_disable == 0 and
636         *   g6->thread_dispatch_disable_level == 0.
637         *
638         * Otherwise, continue with the simple return.
639         */
640        xor      %o2, 1, %o2
641        or       %o2, %l6, %o2
642        orcc     %o2, %o3, %o2
643        bnz      simple_return
644
645        /*
646         * Switch back on the interrupted tasks stack and add enough room to
647         * invoke the dispatcher.  Doing this in the delay slot causes no harm,
648         * since the stack pointer (%sp) is not used in the simple return path.
649         */
650         sub     %fp, SPARC_MINIMUM_STACK_FRAME_SIZE, %sp
651
652isr_dispatch:
653
654        /* Set ISR dispatch disable and thread dispatch disable level to one */
655        mov      1, %l6
656        st       %l6, [%g6 + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
657        st       %l6, [%g6 + PER_CPU_ISR_DISPATCH_DISABLE]
658
659        /* Call _Thread_Do_dispatch(), this function will enable interrupts */
660
661        mov      0, %o1                 ! ISR level for _Thread_Do_dispatch()
662
663#if defined(SPARC_USE_LAZY_FP_SWITCH)
664        /* Test if we interrupted a floating point thread (PSR[EF] == 1) */
665        andcc   %l0, %l5, %g0
666        be      .Lnon_fp_thread_dispatch
667         ld     [%g6 + PER_CPU_OFFSET_EXECUTING], %l6
668
669        /* Set new floating point unit owner to executing thread */
670        st      %l6, [%g6 + SPARC_PER_CPU_FP_OWNER_OFFSET]
671
672        call    SYM(_Thread_Do_dispatch)
673         mov    %g6, %o0
674
675        /*
676         * If we are still the floating point unit owner, then reset the
677         * floating point unit owner to NULL, otherwise clear PSR[EF] in the
678         * interrupt frame and let the FP disabled system call do the floating
679         * point context save/restore.
680         */
681        ld      [%g6 + SPARC_PER_CPU_FP_OWNER_OFFSET], %l7
682        cmp     %l6, %l7
683        bne,a   .Ldisable_fp
684         andn   %l0, %l5, %l0
685        st      %g0, [%g6 + SPARC_PER_CPU_FP_OWNER_OFFSET]
686        ba      .Lthread_dispatch_done
687         nop
688.Ldisable_fp:
689        st       %l0, [%fp + ISF_PSR_OFFSET]
690        ba      .Lthread_dispatch_done
691         nop
692.Lnon_fp_thread_dispatch:
693#elif defined(SPARC_USE_SYNCHRONOUS_FP_SWITCH)
694        /* Test if we interrupted a floating point thread (PSR[EF] == 1) */
695        andcc   %l0, %l5, %g0
696        be      .Lnon_fp_thread_dispatch
697         nop
698
699        /*
700         * Yes, this is a floating point thread, then save the floating point
701         * context to a new stack frame.  Then do the thread dispatch.
702         * Post-switch actions (e.g. signal handlers) and context switch
703         * extensions may safely use the floating point unit.
704         */
705        sub     %sp, SPARC_FP_FRAME_SIZE, %sp
706        std     %f0, [%sp + SPARC_FP_FRAME_OFFSET_FO_F1]
707        SPARC_LEON3FT_B2BST_NOP
708        std     %f2, [%sp + SPARC_FP_FRAME_OFFSET_F2_F3]
709        SPARC_LEON3FT_B2BST_NOP
710        std     %f4, [%sp + SPARC_FP_FRAME_OFFSET_F4_F5]
711        SPARC_LEON3FT_B2BST_NOP
712        std     %f6, [%sp + SPARC_FP_FRAME_OFFSET_F6_F7]
713        SPARC_LEON3FT_B2BST_NOP
714        std     %f8, [%sp + SPARC_FP_FRAME_OFFSET_F8_F9]
715        SPARC_LEON3FT_B2BST_NOP
716        std     %f10, [%sp + SPARC_FP_FRAME_OFFSET_F1O_F11]
717        SPARC_LEON3FT_B2BST_NOP
718        std     %f12, [%sp + SPARC_FP_FRAME_OFFSET_F12_F13]
719        SPARC_LEON3FT_B2BST_NOP
720        std     %f14, [%sp + SPARC_FP_FRAME_OFFSET_F14_F15]
721        SPARC_LEON3FT_B2BST_NOP
722        std     %f16, [%sp + SPARC_FP_FRAME_OFFSET_F16_F17]
723        SPARC_LEON3FT_B2BST_NOP
724        std     %f18, [%sp + SPARC_FP_FRAME_OFFSET_F18_F19]
725        SPARC_LEON3FT_B2BST_NOP
726        std     %f20, [%sp + SPARC_FP_FRAME_OFFSET_F2O_F21]
727        SPARC_LEON3FT_B2BST_NOP
728        std     %f22, [%sp + SPARC_FP_FRAME_OFFSET_F22_F23]
729        SPARC_LEON3FT_B2BST_NOP
730        std     %f24, [%sp + SPARC_FP_FRAME_OFFSET_F24_F25]
731        SPARC_LEON3FT_B2BST_NOP
732        std     %f26, [%sp + SPARC_FP_FRAME_OFFSET_F26_F27]
733        SPARC_LEON3FT_B2BST_NOP
734        std     %f28, [%sp + SPARC_FP_FRAME_OFFSET_F28_F29]
735        SPARC_LEON3FT_B2BST_NOP
736        std     %f30, [%sp + SPARC_FP_FRAME_OFFSET_F3O_F31]
737        SPARC_LEON3FT_B2BST_NOP
738        st      %fsr, [%sp + SPARC_FP_FRAME_OFFSET_FSR]
739        call    SYM(_Thread_Do_dispatch)
740         mov    %g6, %o0
741
742        /*
743         * Restore the floating point context from stack frame and release the
744         * stack frame.
745         */
746        ldd     [%sp + SPARC_FP_FRAME_OFFSET_FO_F1], %f0
747        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F2_F3], %f2
748        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F4_F5], %f4
749        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F6_F7], %f6
750        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F8_F9], %f8
751        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F1O_F11], %f10
752        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F12_F13], %f12
753        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F14_F15], %f14
754        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F16_F17], %f16
755        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F18_F19], %f18
756        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F2O_F21], %f20
757        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F22_F23], %f22
758        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F24_F25], %f24
759        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F26_F27], %f26
760        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F28_F29], %f28
761        ldd     [%sp + SPARC_FP_FRAME_OFFSET_F3O_F31], %f30
762        ld      [%sp + SPARC_FP_FRAME_OFFSET_FSR], %fsr
763        ba      .Lthread_dispatch_done
764         add    %sp, SPARC_FP_FRAME_SIZE, %sp
765
766.Lnon_fp_thread_dispatch:
767#endif
768
769        call    SYM(_Thread_Do_dispatch)
770         mov    %g6, %o0
771
772#if SPARC_HAS_FPU == 1
773.Lthread_dispatch_done:
774#endif
775
776        ta       SPARC_SWTRAP_IRQDIS ! **** DISABLE INTERRUPTS ****
777
778        /*
779         *  While we had ISR dispatching disabled in this thread,
780         *  did we miss anything?  If so, then we need to do another
781         *  _Thread_Do_dispatch() before leaving this ISR dispatch context.
782         */
783        ldub    [%g6 + PER_CPU_DISPATCH_NEEDED], %l7
784
785        orcc    %l7, %g0, %g0        ! Is a thread dispatch necessary?
786        bne     isr_dispatch         ! Yes, then invoke the dispatcher again.
787         mov    0, %o1               ! ISR level for _Thread_Do_dispatch()
788
789        /*
790         * No, then set the ISR dispatch disable flag to zero and continue with
791         * the simple return.
792         */
793        st       %g0, [%g6 + PER_CPU_ISR_DISPATCH_DISABLE]
794
795        /*
796         *  The CWP in place at this point may be different from
797         *  that which was in effect at the beginning of the ISR if we
798         *  have been context switched between the beginning of this invocation
799         *  of _SPARC_Interrupt_trap() and this point.  Thus the CWP and WIM
800         *  should not be changed back to their values at ISR entry time.  Any
801         *  changes to the PSR must preserve the CWP.
802         */
803
804simple_return:
805        ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
806        wr      %l5, 0, %y
807
808        ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
809        ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
810        rd      %psr, %l3
811        and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
812        andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
813        or      %l3, %l0, %l0                  ! install it later...
814        andn    %l0, SPARC_PSR_ET_MASK, %l0
815
816        /*
817         *  Restore tasks global and out registers
818         */
819
820        mov    %fp, %g1
821
822                                              ! g1 is restored later
823        ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
824        ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
825        ld      [%fp + ISF_G7_OFFSET], %g7    ! restore g7
826
827        ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
828        ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
829        ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
830        ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
831
832        /*
833         *  Registers:
834         *
835         *   ALL global registers EXCEPT G1 and the input registers have
836         *   already been restored and thuse off limits.
837         *
838         *   The following is the contents of the local registers:
839         *
840         *     l0 = original psr
841         *     l1 = return address (i.e. PC)
842         *     l2 = nPC
843         *     l3 = CWP
844         */
845
846        /*
847         *  if (CWP + 1) is an invalid window then we need to reload it.
848         *
849         *  WARNING: Traps should now be disabled
850         */
851
852        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
853        nop
854        nop
855        nop
856        rd      %wim, %l4
857        add     %l0, 1, %l6                ! l6 = cwp + 1
858        and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
859        srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
860                                           !  and CWP are conveniently LS 5 bits
861        cmp     %l5, 1                     ! Is tasks window invalid?
862        bne     good_task_window
863
864        /*
865         *  The following code is the same as a 1 position left rotate of WIM.
866         */
867
868        sll     %l4, 1, %l5                ! l5 = WIM << 1
869        srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
870                                           ! l4 = WIM >> (Number Windows - 1)
871        or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
872                                           !      (WIM >> (Number Windows - 1))
873
874        /*
875         *  Now restore the window just as if we underflowed to it.
876         */
877
878        wr      %l4, 0, %wim               ! WIM = new WIM
879        nop                                ! must delay after writing WIM
880        nop
881        nop
882        restore                            ! now into the tasks window
883
884        ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
885        ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
886        ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
887        ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
888        ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
889        ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
890        ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
891        ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
892                                           ! reload of sp clobbers ISF
893        save                               ! Back to ISR dispatch window
894
895good_task_window:
896        TN0018_WAIT_IFLUSH %l3,%l4         ! GRLIB-TN-0018 work around macro
897
898        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
899        nop; nop; nop
900                                           !  and restore condition codes.
901        ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
902        TN0018_FIX %l3,%l4                 ! GRLIB-TN-0018 work around macro
903        jmp     %l1                        ! transfer control and
904        rett    %l2                        ! go back to tasks window
905
906/* end of file */
Note: See TracBrowser for help on using the repository browser.