source: rtems/c/src/lib/libcpu/sparc64/shared/score/interrupt.S @ ce3bfb7

4.115
Last change on this file since ce3bfb7 was ce3bfb7, checked in by Joel Sherrill <joel.sherrill@…>, on 08/25/10 at 20:33:25

2010-08-25 Gedare Bloom <giddyup44@…>

PR 1688/libcpu

  • shared/score/interrupt.S: Fix bug in the sun4u _ISR_Dispatch code that ends up cloberring the global registers. It manifests primarily as a memory alignment error when the globals are used to read to/from memory.
  • Property mode set to 100644
File size: 18.1 KB
Line 
1/*  cpu_asm.s
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language.
6 *
7 *  COPYRIGHT (c) 1989-2007.
8 *  On-Line Applications Research Corporation (OAR).
9 *
10 *  The license and distribution terms for this file may be
11 *  found in the file LICENSE in this distribution or at
12 *  http://www.rtems.com/license/LICENSE.
13 *
14 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
15 *  Research Corporation (OAR) under contract to the European Space
16 *  Agency (ESA).
17 *
18 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
19 *  European Space Agency.
20 *
21 *  Ported to Niagara and UltraSPARC III (US3) implementations of the SPARC-v9.
22 *  Niagara and US3 modifications of respective RTEMS file:
23 *    COPYRIGHT (c) 2010. Gedare Bloom.
24 *
25 *  $Id$
26 */
27
28#include <rtems/asm.h>
29#include <rtems/score/percpu.h>
30
31
32/*
33 *  The assembler needs to be told that we know what to do with
34 *  the global registers.
35 */
36.register %g2, #scratch
37.register %g3, #scratch
38.register %g6, #scratch
39.register %g7, #scratch
40
41
42    /*
43     *  void _ISR_Handler()
44     *
45     *  This routine provides the RTEMS interrupt management.
46     *
47     *  We enter this handler from the 8 instructions in the trap table with
48     *  the following registers assumed to be set as shown:
49     *
50     *    g4 = tstate (old l0)
51     *    g2 = trap type (vector) (old l3)
52     *
53     *  NOTE: By an executive defined convention:
54     *    if trap type is between 0 and 511 it is an asynchronous trap
55     *    if trap type is between 512 and 1023 it is an asynchonous trap
56     */
57
58  .align 4
59PUBLIC(_ISR_Handler)
60  SYM(_ISR_Handler):
61
62    /*
63     * The ISR is called at TL = 1.
64     * On sun4u we use the alternate globals set.     
65     *
66     * On entry:
67     *   g4 = tstate (from trap table)
68     *   g2 = trap vector #
69     *
70     * In either case, note that trap handlers share a register window with
71     * the interrupted context, unless we explicitly enter a new window. This
72     * differs from Sparc v8, in which a dedicated register window is saved
73     * for trap handling.  This means we have to avoid overwriting any registers
74     * that we don't save.
75     *
76     */
77
78
79    /*
80     *  save some or all context on stack
81     */
82
83    /*
84     *  Save the state of the interrupted task -- especially the global
85     *  registers -- in the Interrupt Stack Frame.  Note that the ISF
86     *  includes a regular minimum stack frame which will be used if
87     *  needed by register window overflow and underflow handlers.
88     *
89     *  This is slightly wasteful, since the stack already has the window
90     *  overflow space reserved, but there is no obvious way to ensure
91     *  we can store the interrupted state and still handle window
92     *  spill/fill correctly, since there is no room for the ISF.
93     *
94     */
95
96    /* this is for debugging purposes, make sure that TL = 1, otherwise
97     * things might get dicey */
98    rdpr %tl, %g1
99    cmp %g1, 1
100    be 1f
101    nop
102
103    0: ba 0b
104    nop
105
106    1:
107    /* first store the sp of the interrupted task temporarily in g1 */
108    mov   %sp, %g1
109
110    sub     %sp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
111    ! make space for Stack_Frame||ISF
112
113    /* save tstate, tpc, tnpc, pil */
114    stx   %g4, [%sp + STACK_BIAS + ISF_TSTATE_OFFSET] 
115    rdpr  %pil, %g3
116    rdpr  %tpc, %g4
117    rdpr  %tnpc, %g5
118    stx   %g3, [%sp + STACK_BIAS + ISF_PIL_OFFSET]
119    stx   %g4, [%sp + STACK_BIAS + ISF_TPC_OFFSET]
120    stx   %g5, [%sp + STACK_BIAS + ISF_TNPC_OFFSET]
121    stx   %g2, [%sp + STACK_BIAS + ISF_TVEC_NUM]
122
123    rd  %y, %g4        ! save y
124    stx   %g4, [%sp + STACK_BIAS + ISF_Y_OFFSET]
125
126    ! save interrupted frame's output regs
127    stx     %o0, [%sp + STACK_BIAS + ISF_O0_OFFSET]     ! save o0
128    stx     %o1, [%sp + STACK_BIAS + ISF_O1_OFFSET]     ! save o1
129    stx     %o2, [%sp + STACK_BIAS + ISF_O2_OFFSET]     ! save o2
130    stx     %o3, [%sp + STACK_BIAS + ISF_O3_OFFSET]     ! save o3
131    stx     %o4, [%sp + STACK_BIAS + ISF_O4_OFFSET]     ! save o4
132    stx     %o5, [%sp + STACK_BIAS + ISF_O5_OFFSET]     ! save o5
133    stx     %g1, [%sp + STACK_BIAS + ISF_O6_SP_OFFSET]  ! save o6/sp
134    stx     %o7, [%sp + STACK_BIAS + ISF_O7_OFFSET]     ! save o7
135
136    mov  %g1, %o5    ! hold the old sp here for now
137    mov  %g2, %o1    ! we'll need trap # later
138
139    /* switch to TL[0] */
140    wrpr  %g0, 0, %tl
141
142    /* switch to normal globals */
143#if defined (SUN4U)
144    /* the assignment to pstate below will mask out the AG bit */
145#elif defined (SUN4V)
146    wrpr  %g0, 0, %gl
147#endif
148    /* get pstate to known state */
149    wrpr  %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK, %pstate
150
151    ! save globals
152    stx     %g1, [%sp + STACK_BIAS + ISF_G1_OFFSET]     ! save g1
153    stx     %g2, [%sp + STACK_BIAS + ISF_G2_OFFSET]     ! save g2
154    stx     %g3, [%sp + STACK_BIAS + ISF_G3_OFFSET]     ! save g3
155    stx     %g4, [%sp + STACK_BIAS + ISF_G4_OFFSET]     ! save g4
156    stx     %g5, [%sp + STACK_BIAS + ISF_G5_OFFSET]     ! save g5
157    stx     %g6, [%sp + STACK_BIAS + ISF_G6_OFFSET]     ! save g6
158    stx     %g7, [%sp + STACK_BIAS + ISF_G7_OFFSET]     ! save g7
159
160
161  mov  %o1, %g2  ! get the trap #
162  mov  %o5, %g7  ! store the interrupted %sp (preserve)
163  mov  %sp, %o1  ! 2nd arg to ISR Handler = address of ISF
164
165  /*
166   *  Increment ISR nest level and Thread dispatch disable level.
167   *
168   *  Register usage for this section: (note, these are used later)
169   *
170   *    g3 = _Thread_Dispatch_disable_level pointer
171   *    g5 = _Thread_Dispatch_disable_level value (uint32_t)
172   *    g6 = _ISR_Nest_level pointer
173   *    g4 = _ISR_Nest_level value (uint32_t)
174   *    o5 = temp
175   *
176   *  NOTE: It is assumed that g6 - g7 will be preserved until the ISR
177   *        nest and thread dispatch disable levels are unnested.
178   */
179
180  setx  SYM(_Thread_Dispatch_disable_level), %o5, %g3
181  lduw  [%g3], %g5
182  setx  ISR_NEST_LEVEL, %o5, %g6
183  lduw  [%g6], %g4
184
185  add      %g5, 1, %g5
186  stuw     %g5, [%g3]
187
188  add      %g4, 1, %g4
189  stuw     %g4, [%g6]
190
191  /*
192   *  If ISR nest level was zero (now 1), then switch stack.
193   */
194
195  subcc    %g4, 1, %g4             ! outermost interrupt handler?
196  bnz      dont_switch_stacks      ! No, then do not switch stacks
197
198  setx  SYM(INTERRUPT_STACK_HIGH), %o5, %g1
199  ldx  [%g1], %sp
200
201  /*
202   * Adjust the stack for the stack bias
203   */
204  sub     %sp, STACK_BIAS, %sp
205
206  /*
207   *  Make sure we have a place on the stack for the window overflow
208   *  trap handler to write into.  At this point it is safe to
209   *  enable traps again.
210   */
211
212  sub      %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
213
214  dont_switch_stacks:
215  /*
216   *  Check if we have an external interrupt (trap 0x41 - 0x4f). If so,
217   *  set the PIL to mask off interrupts with lower priority.
218   *
219   *  The original PIL is not modified since it will be restored
220   *  when the interrupt handler returns.
221   */
222
223  and      %g2, 0x0ff, %g1 ! is bottom byte of vector number [0x41,0x4f]?
224
225  subcc    %g1, 0x41, %g0
226  bl       dont_fix_pil
227  subcc    %g1, 0x4f, %g0
228  bg       dont_fix_pil
229  nop
230  wrpr     %g0, %g1, %pil
231
232  dont_fix_pil:
233  /* We need to be careful about enabling traps here.
234   *
235   * We already stored off the tstate, tpc, and tnpc, and switched to
236   * TL = 0, so it should be safe.
237   */
238
239  /* zero out g4 so that ofw calls work */
240  mov  %g0, %g4
241
242  ! **** ENABLE TRAPS ****
243  wrpr  %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \
244    SPARC_PSTATE_IE_MASK, %pstate
245
246    /*
247     *  Vector to user's handler.
248     *
249     *  NOTE: TBR may no longer have vector number in it since
250     *        we just enabled traps.  It is definitely in g2.
251     */
252    setx  SYM(_ISR_Vector_table), %o5, %g1
253    ldx      [%g1], %g1
254    and      %g2, 0x1FF, %o5        ! remove synchronous trap indicator
255    sll      %o5, 3, %o5            ! o5 = offset into table
256    ldx      [%g1 + %o5], %g1       ! g1 = _ISR_Vector_table[ vector ]
257
258
259    ! o1 = 2nd arg = address of the ISF
260    !   WAS LOADED WHEN ISF WAS SAVED!!!
261    mov      %g2, %o0               ! o0 = 1st arg = vector number
262    call     %g1, 0
263    nop                             ! delay slot
264
265    /*
266     *  Redisable traps so we can finish up the interrupt processing.
267     *  This is a conservative place to do this.
268     */
269    ! **** DISABLE TRAPS ****
270    wrpr  %g0, SPARC_PSTATE_PRIV_MASK, %pstate
271
272    /*
273     * We may safely use any of the %o and %g registers, because
274     * we saved them earlier (and any other interrupt that uses
275     * them will also save them).  Right now, the state of those
276     * registers are as follows:
277     *  %o registers: unknown (user's handler may have destroyed)
278     *  %g1,g4,g5: scratch
279     *  %g2: unknown: was trap vector
280     *  %g3: uknown: was _Thread_Dispatch_Disable_level pointer
281     *  %g6: _ISR_Nest_level
282     *  %g7: interrupted task's sp
283     */
284
285    /*
286     *  Increment ISR nest level and Thread dispatch disable level.
287     *
288     *  Register usage for this section: (note: as used above)
289     *
290     *    g3 = _Thread_Dispatch_disable_level pointer
291     *    g5 = _Thread_Dispatch_disable_level value
292     *    g6 = _ISR_Nest_level pointer
293     *    g4 = _ISR_Nest_level value
294     *    o5 = temp
295     */
296
297    /* We have to re-load the values from memory, because there are
298     * not enough registers that we know will be preserved across the
299     * user's handler. If this is a problem, we can create a register
300     * window for _ISR_Handler.
301     */
302
303    setx  SYM(_Thread_Dispatch_disable_level), %o5, %g3
304    lduw  [%g3],%g5
305    lduw  [%g6],%g4
306    sub   %g5, 1, %g5
307    stuw  %g5, [%g3]
308    sub   %g4, 1, %g4
309    stuw  %g4, [%g6]
310
311    orcc  %g4, %g0, %g0           ! ISRs still nested?
312    bnz   dont_restore_stack      ! Yes then don't restore stack yet
313    nop
314
315    /*
316     *  This is the outermost interrupt handler. Need to get off the
317     *  CPU Interrupt Stack and back to the tasks stack.
318     *
319     *  The following subtract should get us back on the interrupted
320     *  tasks stack and add enough room to invoke the dispatcher.
321     *  When we enable traps, we are mostly back in the context
322     *  of the task and subsequent interrupts can operate normally.
323     *
324     *  Now %sp points to the bottom of the ISF.
325     * 
326     */
327
328    sub      %g7,   CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
329
330    dont_restore_stack:
331
332    /*
333     *  If dispatching is disabled (includes nested interrupt case),
334     *  then do a "simple" exit.
335     */
336
337    orcc     %g5, %g0, %g0   ! Is dispatching disabled?
338    bnz      simple_return   ! Yes, then do a "simple" exit
339    ! NOTE: Use the delay slot
340    mov      %g0, %g4  ! clear g4 for ofw
341
342    ! Are we dispatching from a previous ISR in the interrupted thread?
343    setx  SYM(_CPU_ISR_Dispatch_disable), %o5, %g5
344    lduw     [%g5], %o5
345    orcc     %o5, %g0, %g0   ! Is this thread already doing an ISR?
346    bnz      simple_return   ! Yes, then do a "simple" exit
347    nop
348
349    setx    DISPATCH_NEEDED, %o5, %g7
350
351
352    /*
353     *  If a context switch is necessary, then do fudge stack to
354     *  return to the interrupt dispatcher.
355     */
356
357    ldub     [%g7], %o5
358
359    orcc     %o5, %g0, %g0   ! Is thread switch necessary?
360    bz       simple_return   ! no, then do a simple return. otherwise fallthru
361    nop
362
363    /*
364     *  Invoke interrupt dispatcher.
365     */
366PUBLIC(_ISR_Dispatch)
367  SYM(_ISR_Dispatch):
368    ! Set ISR dispatch nesting prevention flag
369      mov      1, %o1
370      setx     SYM(_CPU_ISR_Dispatch_disable), %o5, %o2
371      stuw     %o1, [%o2]
372
373
374      !  **** ENABLE TRAPS ****
375      wrpr  %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \
376        SPARC_PSTATE_IE_MASK, %pstate
377        isr_dispatch:
378        call    SYM(_Thread_Dispatch), 0
379        nop
380
381        /*
382         *  We invoked _Thread_Dispatch in a state similar to the interrupted
383         *  task.  In order to safely be able to tinker with the register
384         *  windows and get the task back to its pre-interrupt state,
385         *  we need to disable interrupts.
386         */
387      mov   2, %g4        ! syscall (disable interrupts)
388      ta    0             ! syscall (disable interrupts)
389      mov   0, %g4
390
391  /*
392   *  While we had ISR dispatching disabled in this thread,
393   *  did we miss anything.  If so, then we need to do another
394   *  _Thread_Dispatch before leaving this ISR Dispatch context.
395   */
396
397  setx     DISPATCH_NEEDED, %o5, %o1
398  ldub     [%o1], %o2
399
400  orcc     %o2, %g0, %g0   ! Is thread switch necessary?
401  bz       allow_nest_again ! No, then clear out and return
402  nop
403
404  ! Yes, then invoke the dispatcher
405dispatchAgain:
406  mov      3, %g4        ! syscall (enable interrupts)
407  ta       0             ! syscall (enable interrupts)
408  ba       isr_dispatch
409  mov      0, %g4
410
411  allow_nest_again:
412
413  ! Zero out ISR stack nesting prevention flag
414  setx    SYM(_CPU_ISR_Dispatch_disable), %o5, %o1
415  stuw    %g0,[%o1]
416
417  /*
418   *  The CWP in place at this point may be different from
419   *  that which was in effect at the beginning of the ISR if we
420   *  have been context switched between the beginning of this invocation
421   *  of _ISR_Handler and this point.  Thus the CWP and WIM should
422   *  not be changed back to their values at ISR entry time.  Any
423   *  changes to the PSR must preserve the CWP.
424   */
425
426  simple_return:
427  flushw          ! get register windows to a 'clean' state
428
429  ! **** DISABLE TRAPS ****
430  wrpr    %g0, SPARC_PSTATE_PRIV_MASK, %pstate
431
432  ldx     [%sp + STACK_BIAS + ISF_Y_OFFSET], %o1      ! restore y
433  wr      %o1, 0, %y
434
435  ldx  [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1
436
437! see if cwp is proper (tstate.cwp == cwp)
438  and  %g1, 0x1F, %g6
439  rdpr  %cwp, %g7
440  cmp  %g6, %g7
441  bz  good_window
442  nop
443
444  /*
445   * Fix the CWP. Need the cwp to be the proper cwp that
446   * gets restored when returning from the trap via retry/done. Do
447   * this before reloading the task's output regs. Basically fake a
448   * window spill/fill.
449   *
450   * Is this necessary on sun4v? Why not just re-write
451   * tstate.cwp to be equal to the current cwp?
452   */
453  mov  %sp, %g1
454  stx  %l0, [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET]
455  stx  %l1, [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET]
456  stx  %l2, [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET]
457  stx  %l3, [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET]
458  stx  %l4, [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET]
459  stx  %l5, [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET]
460  stx  %l6, [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET]
461  stx  %l7, [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET]
462  stx  %i0, [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET]
463  stx  %i1, [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET]
464  stx  %i2, [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET]
465  stx  %i3, [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET]
466  stx  %i4, [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET]
467  stx  %i5, [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET]
468  stx  %i6, [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET]
469  stx  %i7, [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET]
470  wrpr  %g0, %g6, %cwp
471  mov  %g1, %sp
472  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET], %l0
473  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET], %l1
474  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET], %l2
475  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET], %l3
476  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET], %l4
477  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET], %l5
478  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET], %l6
479  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET], %l7
480  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET], %i0
481  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET], %i1
482  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET], %i2
483  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET], %i3
484  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET], %i4
485  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET], %i5
486  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
487  ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET], %i7
488
489
490  good_window:
491
492
493  /*
494   *  Restore tasks global and out registers
495   */
496
497  ldx     [%sp + STACK_BIAS + ISF_G1_OFFSET], %g1    ! restore g1
498  ldx     [%sp + STACK_BIAS + ISF_G2_OFFSET], %g2    ! restore g2
499  ldx     [%sp + STACK_BIAS + ISF_G3_OFFSET], %g3    ! restore g3
500  ldx     [%sp + STACK_BIAS + ISF_G4_OFFSET], %g4    ! restore g4
501  ldx     [%sp + STACK_BIAS + ISF_G5_OFFSET], %g5    ! restore g5
502  ldx     [%sp + STACK_BIAS + ISF_G6_OFFSET], %g6    ! restore g6
503  ldx     [%sp + STACK_BIAS + ISF_G7_OFFSET], %g7    ! restore g7
504
505  ! Assume the interrupted context is in TL 0 with GL 0 / normal globals.
506  ! When tstate is restored at done/retry, the interrupted context is restored.
507  ! return to TL[1], GL[1], and restore TSTATE, TPC, and TNPC
508  wrpr  %g0, 1, %tl
509
510  ! return to GL=1 or AG
511#if defined(SUN4U)
512    rdpr  %pstate, %o1
513    or  %o1, SPARC_PSTATE_AG_MASK, %o1
514    wrpr  %o1, %g0, %pstate                 ! go to AG.
515#elif defined(SUN4V)
516  wrpr  %g0, 1, %gl
517#endif
518
519! now we can use global registers (at gl=1 or AG)
520  ldx   [%sp + STACK_BIAS + ISF_PIL_OFFSET], %g3
521  ldx   [%sp + STACK_BIAS + ISF_TPC_OFFSET], %g4
522  ldx   [%sp + STACK_BIAS + ISF_TNPC_OFFSET], %g5
523  ldx   [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1
524  ldx   [%sp + STACK_BIAS + ISF_TVEC_NUM], %g2
525  wrpr  %g0, %g3, %pil
526  wrpr  %g0, %g4, %tpc
527  wrpr  %g0, %g5, %tnpc
528
529  wrpr    %g0, %g1, %tstate
530
531  ldx     [%sp + STACK_BIAS + ISF_O0_OFFSET], %o0    ! restore o0
532  ldx     [%sp + STACK_BIAS + ISF_O1_OFFSET], %o1    ! restore o1
533  ldx     [%sp + STACK_BIAS + ISF_O2_OFFSET], %o2    ! restore o2
534  ldx     [%sp + STACK_BIAS + ISF_O3_OFFSET], %o3    ! restore o3
535  ldx     [%sp + STACK_BIAS + ISF_O4_OFFSET], %o4    ! restore o4
536  ldx     [%sp + STACK_BIAS + ISF_O5_OFFSET], %o5    ! restore o5
537  ! sp is restored later
538  ldx     [%sp + STACK_BIAS + ISF_O7_OFFSET], %o7    ! restore o7
539
540  ldx     [%sp + STACK_BIAS + ISF_O6_SP_OFFSET], %o6 ! restore o6/sp
541
542  /*
543   *  Determine whether to re-execute the trapping instruction
544   *  (asynchronous trap) or to skip the trapping instruction
545   *  (synchronous trap).
546   */
547
548  andcc   %g2, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
549  ! Is this a synchronous trap?
550  be  not_synch             ! No, then skip trapping instruction
551  mov  0, %g4
552  retry        ! re-execute trapping instruction
553  not_synch:
554  done        ! skip trapping instruction
555
556/* end of file */
Note: See TracBrowser for help on using the repository browser.