source: rtems/cpukit/score/cpu/mips/cpu_asm.S @ c556d0ba

4.104.114.84.95
Last change on this file since c556d0ba was c556d0ba, checked in by Joel Sherrill <joel.sherrill@…>, on 05/07/01 at 13:06:56

2001-05-07 Joel Sherrill <joel@…>

  • cpu_asm.S: Merged patches from Gregory Menke <Gregory.D.Menke.1@…> that clean up stack usage and include nops in the delay slots.
  • Property mode set to 100644
File size: 17.1 KB
Line 
1/*
2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
22 *          the baseline of the more general MIPS port. 
23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
26 * 
27 *  COPYRIGHT (c) 1989-2000.
28 *  On-Line Applications Research Corporation (OAR).
29 *
30 *  The license and distribution terms for this file may be
31 *  found in the file LICENSE in this distribution or at
32 *  http://www.OARcorp.com/rtems/license.html.
33 *
34 *  $Id$
35 */
36
37#include <asm.h>
38#include "iregdef.h"
39#include "idtcpu.h"
40
41/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
42 *  and MIPS ISA Level 1 (R3xxx).
43 */
44
45#if __mips == 3
46/* 64 bit register operations */
47#define NOP     
48#define ADD     dadd
49#define STREG   sd
50#define LDREG   ld
51#define MFCO    dmfc0
52#define MTCO    dmtc0
53#define ADDU    addu
54#define ADDIU   addiu
55#define R_SZ    8
56#define F_SZ    8
57#define SZ_INT  8
58#define SZ_INT_POW2 3
59
60/* XXX if we don't always want 64 bit register ops, then another ifdef */
61
62#elif __mips == 1
63/* 32 bit register operations*/
64#define NOP     nop
65#define ADD     add
66#define STREG   sw
67#define LDREG   lw
68#define MFCO    mfc0
69#define MTCO    mtc0
70#define ADDU    add
71#define ADDIU   addi
72#define R_SZ    4
73#define F_SZ    4
74#define SZ_INT  4
75#define SZ_INT_POW2 2
76#else
77#error "mips assembly: what size registers do I deal with?"
78#endif
79
80
81#define ISR_VEC_SIZE    4
82#define EXCP_STACK_SIZE (NREGS*R_SZ)
83
84       
85#ifdef __GNUC__
86#define ASM_EXTERN(x,size) .extern x,size
87#else
88#define ASM_EXTERN(x,size)
89#endif
90
91/* NOTE: these constants must match the Context_Control structure in cpu.h */
92#define S0_OFFSET 0
93#define S1_OFFSET 1
94#define S2_OFFSET 2
95#define S3_OFFSET 3
96#define S4_OFFSET 4
97#define S5_OFFSET 5
98#define S6_OFFSET 6
99#define S7_OFFSET 7
100#define SP_OFFSET 8
101#define FP_OFFSET 9
102#define RA_OFFSET 10
103#define C0_SR_OFFSET 11
104#define C0_EPC_OFFSET 12
105
106/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
107#define FP0_OFFSET  0
108#define FP1_OFFSET  1
109#define FP2_OFFSET  2
110#define FP3_OFFSET  3
111#define FP4_OFFSET  4
112#define FP5_OFFSET  5
113#define FP6_OFFSET  6
114#define FP7_OFFSET  7
115#define FP8_OFFSET  8
116#define FP9_OFFSET  9
117#define FP10_OFFSET 10
118#define FP11_OFFSET 11
119#define FP12_OFFSET 12
120#define FP13_OFFSET 13
121#define FP14_OFFSET 14
122#define FP15_OFFSET 15
123#define FP16_OFFSET 16
124#define FP17_OFFSET 17
125#define FP18_OFFSET 18
126#define FP19_OFFSET 19
127#define FP20_OFFSET 20
128#define FP21_OFFSET 21
129#define FP22_OFFSET 22
130#define FP23_OFFSET 23
131#define FP24_OFFSET 24
132#define FP25_OFFSET 25
133#define FP26_OFFSET 26
134#define FP27_OFFSET 27
135#define FP28_OFFSET 28
136#define FP29_OFFSET 29
137#define FP30_OFFSET 30
138#define FP31_OFFSET 31
139
140
141/*
142 *  _CPU_Context_save_fp_context
143 *
144 *  This routine is responsible for saving the FP context
145 *  at *fp_context_ptr.  If the point to load the FP context
146 *  from is changed then the pointer is modified by this routine.
147 *
148 *  Sometimes a macro implementation of this is in cpu.h which dereferences
149 *  the ** and a similarly named routine in this file is passed something
150 *  like a (Context_Control_fp *).  The general rule on making this decision
151 *  is to avoid writing assembly language.
152 */
153
154/* void _CPU_Context_save_fp(
155 *   void **fp_context_ptr
156 * );
157 */
158
159#if ( CPU_HARDWARE_FP == FALSE )
160FRAME(_CPU_Context_save_fp,sp,0,ra)
161        .set noat
162        ld a1,(a0)
163        NOP
164        swc1 $f0,FP0_OFFSET*F_SZ(a1)
165        swc1 $f1,FP1_OFFSET*F_SZ(a1)
166        swc1 $f2,FP2_OFFSET*F_SZ(a1)
167        swc1 $f3,FP3_OFFSET*F_SZ(a1)
168        swc1 $f4,FP4_OFFSET*F_SZ(a1)
169        swc1 $f5,FP5_OFFSET*F_SZ(a1)
170        swc1 $f6,FP6_OFFSET*F_SZ(a1)
171        swc1 $f7,FP7_OFFSET*F_SZ(a1)
172        swc1 $f8,FP8_OFFSET*F_SZ(a1)
173        swc1 $f9,FP9_OFFSET*F_SZ(a1)
174        swc1 $f10,FP10_OFFSET*F_SZ(a1)
175        swc1 $f11,FP11_OFFSET*F_SZ(a1)
176        swc1 $f12,FP12_OFFSET*F_SZ(a1)
177        swc1 $f13,FP13_OFFSET*F_SZ(a1)
178        swc1 $f14,FP14_OFFSET*F_SZ(a1)
179        swc1 $f15,FP15_OFFSET*F_SZ(a1)
180        swc1 $f16,FP16_OFFSET*F_SZ(a1)
181        swc1 $f17,FP17_OFFSET*F_SZ(a1)
182        swc1 $f18,FP18_OFFSET*F_SZ(a1)
183        swc1 $f19,FP19_OFFSET*F_SZ(a1)
184        swc1 $f20,FP20_OFFSET*F_SZ(a1)
185        swc1 $f21,FP21_OFFSET*F_SZ(a1)
186        swc1 $f22,FP22_OFFSET*F_SZ(a1)
187        swc1 $f23,FP23_OFFSET*F_SZ(a1)
188        swc1 $f24,FP24_OFFSET*F_SZ(a1)
189        swc1 $f25,FP25_OFFSET*F_SZ(a1)
190        swc1 $f26,FP26_OFFSET*F_SZ(a1)
191        swc1 $f27,FP27_OFFSET*F_SZ(a1)
192        swc1 $f28,FP28_OFFSET*F_SZ(a1)
193        swc1 $f29,FP29_OFFSET*F_SZ(a1)
194        swc1 $f30,FP30_OFFSET*F_SZ(a1)
195        swc1 $f31,FP31_OFFSET*F_SZ(a1)
196        j ra
197        nop
198        .set at
199ENDFRAME(_CPU_Context_save_fp)
200#endif
201
202/*
203 *  _CPU_Context_restore_fp_context
204 *
205 *  This routine is responsible for restoring the FP context
206 *  at *fp_context_ptr.  If the point to load the FP context
207 *  from is changed then the pointer is modified by this routine.
208 *
209 *  Sometimes a macro implementation of this is in cpu.h which dereferences
210 *  the ** and a similarly named routine in this file is passed something
211 *  like a (Context_Control_fp *).  The general rule on making this decision
212 *  is to avoid writing assembly language.
213 */
214
215/* void _CPU_Context_restore_fp(
216 *   void **fp_context_ptr
217 * )
218 */
219
220#if ( CPU_HARDWARE_FP == FALSE )
221FRAME(_CPU_Context_restore_fp,sp,0,ra)
222        .set noat
223        ld a1,(a0)
224        NOP
225        lwc1 $f0,FP0_OFFSET*4(a1)
226        lwc1 $f1,FP1_OFFSET*4(a1)
227        lwc1 $f2,FP2_OFFSET*4(a1)
228        lwc1 $f3,FP3_OFFSET*4(a1)
229        lwc1 $f4,FP4_OFFSET*4(a1)
230        lwc1 $f5,FP5_OFFSET*4(a1)
231        lwc1 $f6,FP6_OFFSET*4(a1)
232        lwc1 $f7,FP7_OFFSET*4(a1)
233        lwc1 $f8,FP8_OFFSET*4(a1)
234        lwc1 $f9,FP9_OFFSET*4(a1)
235        lwc1 $f10,FP10_OFFSET*4(a1)
236        lwc1 $f11,FP11_OFFSET*4(a1)
237        lwc1 $f12,FP12_OFFSET*4(a1)
238        lwc1 $f13,FP13_OFFSET*4(a1)
239        lwc1 $f14,FP14_OFFSET*4(a1)
240        lwc1 $f15,FP15_OFFSET*4(a1)
241        lwc1 $f16,FP16_OFFSET*4(a1)
242        lwc1 $f17,FP17_OFFSET*4(a1)
243        lwc1 $f18,FP18_OFFSET*4(a1)
244        lwc1 $f19,FP19_OFFSET*4(a1)
245        lwc1 $f20,FP20_OFFSET*4(a1)
246        lwc1 $f21,FP21_OFFSET*4(a1)
247        lwc1 $f22,FP22_OFFSET*4(a1)
248        lwc1 $f23,FP23_OFFSET*4(a1)
249        lwc1 $f24,FP24_OFFSET*4(a1)
250        lwc1 $f25,FP25_OFFSET*4(a1)
251        lwc1 $f26,FP26_OFFSET*4(a1)
252        lwc1 $f27,FP27_OFFSET*4(a1)
253        lwc1 $f28,FP28_OFFSET*4(a1)
254        lwc1 $f29,FP29_OFFSET*4(a1)
255        lwc1 $f30,FP30_OFFSET*4(a1)
256        lwc1 $f31,FP31_OFFSET*4(a1)
257        j ra
258        nop
259        .set at
260ENDFRAME(_CPU_Context_restore_fp)
261#endif
262
263/*  _CPU_Context_switch
264 *
265 *  This routine performs a normal non-FP context switch.
266 */
267
268/* void _CPU_Context_switch(
269 *   Context_Control  *run,
270 *   Context_Control  *heir
271 * )
272 */
273
274FRAME(_CPU_Context_switch,sp,0,ra)
275
276        MFC0  t0,C0_SR
277        li    t1,~(SR_INTERRUPT_ENABLE_BITS)
278        STREG t0,C0_SR_OFFSET*4(a0)   /* save status register */
279        and   t0,t1
280        MTC0  t0,C0_SR                /* first disable ie bit (recommended) */
281#if __mips == 3
282        ori   t0,SR_EXL|SR_IE   /* enable exception level to disable interrupts */
283        MTC0  t0,C0_SR
284#endif
285
286        STREG ra,RA_OFFSET*R_SZ(a0)         /* save current context */
287        STREG sp,SP_OFFSET*R_SZ(a0)
288        STREG fp,FP_OFFSET*R_SZ(a0)
289        STREG s0,S0_OFFSET*R_SZ(a0)
290        STREG s1,S1_OFFSET*R_SZ(a0)
291        STREG s2,S2_OFFSET*R_SZ(a0)
292        STREG s3,S3_OFFSET*R_SZ(a0)
293        STREG s4,S4_OFFSET*R_SZ(a0)
294        STREG s5,S5_OFFSET*R_SZ(a0)
295        STREG s6,S6_OFFSET*R_SZ(a0)
296        STREG s7,S7_OFFSET*R_SZ(a0)
297
298        MFC0  t0,C0_EPC
299        NOP
300        STREG t0,C0_EPC_OFFSET*R_SZ(a0)
301
302_CPU_Context_switch_restore:
303        LDREG ra,RA_OFFSET*R_SZ(a1)
304        LDREG sp,SP_OFFSET*R_SZ(a1)
305        LDREG fp,FP_OFFSET*R_SZ(a1)
306        LDREG s0,S0_OFFSET*R_SZ(a1)           /* restore context */
307        LDREG s1,S1_OFFSET*R_SZ(a1)
308        LDREG s2,S2_OFFSET*R_SZ(a1)
309        LDREG s3,S3_OFFSET*R_SZ(a1)
310        LDREG s4,S4_OFFSET*R_SZ(a1)
311        LDREG s5,S5_OFFSET*R_SZ(a1)
312        LDREG s6,S6_OFFSET*R_SZ(a1)
313        LDREG s7,S7_OFFSET*R_SZ(a1)
314
315        LDREG t0,C0_EPC_OFFSET*R_SZ(a1)
316        NOP
317        MTC0  t0,C0_EPC
318        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
319        NOP
320       
321#if __mips == 3
322        andi  t0,SR_EXL
323        bnez  t0,_CPU_Context_1   /* set exception level from restore context */
324        li    t0,~SR_EXL
325        MFC0  t1,C0_SR
326        NOP
327        and   t1,t0
328        MTC0  t1,C0_SR
329
330#elif __mips == 1
331        andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
332        beq   t0,$0,_CPU_Context_1          /* set level from restore context */
333        MFC0  t0,C0_SR
334        NOP
335        or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  */
336        MTC0  t0,C0_SR                      /* set with enabled */
337#endif
338
339_CPU_Context_1:
340        j ra
341        NOP
342ENDFRAME(_CPU_Context_switch)
343
344/*
345 *  _CPU_Context_restore
346 *
347 *  This routine is generally used only to restart self in an
348 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
349 *
350 *  NOTE: May be unnecessary to reload some registers.
351 *
352 *  void _CPU_Context_restore(
353 *    Context_Control *new_context
354 *  );
355 */
356
357FRAME(_CPU_Context_restore,sp,0,ra)
358        ADD a1,a0,zero
359        j   _CPU_Context_switch_restore
360        NOP
361ENDFRAME(_CPU_Context_restore)
362
363ASM_EXTERN(_ISR_Nest_level, SZ_INT)
364ASM_EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
365ASM_EXTERN(_Context_Switch_necessary,SZ_INT)
366ASM_EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
367.extern _Thread_Dispatch
368.extern _ISR_Vector_table
369
370/*  void __ISR_Handler()
371 *
372 *  This routine provides the RTEMS interrupt management.
373 *
374 *  void _ISR_Handler()
375 *
376 *
377 *  This discussion ignores a lot of the ugly details in a real
378 *  implementation such as saving enough registers/state to be
379 *  able to do something real.  Keep in mind that the goal is
380 *  to invoke a user's ISR handler which is written in C and
381 *  uses a certain set of registers.
382 *
383 *  Also note that the exact order is to a large extent flexible.
384 *  Hardware will dictate a sequence for a certain subset of
385 *  _ISR_Handler while requirements for setting
386 *
387 *  At entry to "common" _ISR_Handler, the vector number must be
388 *  available.  On some CPUs the hardware puts either the vector
389 *  number or the offset into the vector table for this ISR in a
390 *  known place.  If the hardware does not give us this information,
391 *  then the assembly portion of RTEMS for this port will contain
392 *  a set of distinct interrupt entry points which somehow place
393 *  the vector number in a known place (which is safe if another
394 *  interrupt nests this one) and branches to _ISR_Handler.
395 *
396 */
397
398FRAME(_ISR_Handler,sp,0,ra)
399        .set noreorder
400
401        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
402
403        /* wastes a lot of stack space for context?? */
404        ADDIU    sp,sp,-EXCP_STACK_SIZE
405
406        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
407        STREG v0, R_V0*R_SZ(sp)
408        STREG v1, R_V1*R_SZ(sp)
409        STREG a0, R_A0*R_SZ(sp)
410        STREG a1, R_A1*R_SZ(sp)
411        STREG a2, R_A2*R_SZ(sp)
412        STREG a3, R_A3*R_SZ(sp)
413        STREG t0, R_T0*R_SZ(sp)
414        STREG t1, R_T1*R_SZ(sp)
415        STREG t2, R_T2*R_SZ(sp)
416        STREG t3, R_T3*R_SZ(sp)
417        STREG t4, R_T4*R_SZ(sp)
418        STREG t5, R_T5*R_SZ(sp)
419        STREG t6, R_T6*R_SZ(sp)
420        STREG t7, R_T7*R_SZ(sp)
421        mflo  k0
422        STREG t8, R_T8*R_SZ(sp)
423        STREG k0, R_MDLO*R_SZ(sp)
424        STREG t9, R_T9*R_SZ(sp)
425        mfhi  k0
426        STREG gp, R_GP*R_SZ(sp)
427        STREG k0, R_MDHI*R_SZ(sp)
428        STREG fp, R_FP*R_SZ(sp)
429        .set noat
430        STREG AT, R_AT*R_SZ(sp)
431        .set at
432
433        MFC0     t0,C0_EPC                /* XXX */
434        MFC0     t1,C0_SR
435        STREG    t0,R_EPC*R_SZ(sp)        /* XXX store EPC on the stack */
436        STREG    t1,R_SR*R_SZ(sp)         /* XXX store SR on the stack */
437
438/* determine if an interrupt generated this exception */
439
440        MFC0     k0,C0_CAUSE
441        NOP
442        and      k1,k0,CAUSE_EXCMASK
443        beq      k1, 0, _ISR_Handler_1
444
445_ISR_Handler_Exception:
446        nop
447        jal    mips_vector_exceptions
448        nop
449
450_ISR_Handler_1:
451
452        MFC0     k1,C0_SR
453        and      k0,CAUSE_IPMASK
454        and      k0,k1
455        beq      k0,zero,_ISR_Handler_exit
456                /* external interrupt not enabled, ignore */
457                /* but if it's not an exception or an interrupt, */
458                /* Then where did it come from??? */
459        nop
460
461  /*
462   *  save some or all context on stack
463   *  may need to save some special interrupt information for exit
464   *
465   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
466   *    if ( _ISR_Nest_level == 0 )
467   *      switch to software interrupt stack
468   *  #endif
469   */
470
471  /*
472   *  _ISR_Nest_level++;
473   */
474        LDREG  t0,_ISR_Nest_level
475        NOP
476        ADD    t0,t0,1
477        STREG  t0,_ISR_Nest_level
478  /*
479   *  _Thread_Dispatch_disable_level++;
480   */
481        LDREG  t1,_Thread_Dispatch_disable_level
482        NOP
483        ADD    t1,t1,1
484        STREG  t1,_Thread_Dispatch_disable_level
485
486  /*
487   *  Call the CPU model or BSP specific routine to decode the
488   *  interrupt source and actually vector to device ISR handlers.
489   */
490
491        jal    mips_vector_isr_handlers
492        nop
493
494  /*
495   *  --_ISR_Nest_level;
496   */
497        LDREG  t2,_ISR_Nest_level
498        NOP
499        ADD    t2,t2,-1
500        STREG  t2,_ISR_Nest_level
501  /*
502   *  --_Thread_Dispatch_disable_level;
503   */
504        LDREG  t1,_Thread_Dispatch_disable_level
505        NOP
506        ADD    t1,t1,-1
507        STREG  t1,_Thread_Dispatch_disable_level
508  /*
509   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
510   *    goto the label "exit interrupt (simple case)"
511   */
512        or  t0,t2,t1
513        bne t0,zero,_ISR_Handler_exit
514        nop
515  /*
516   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
517   *    restore stack
518   *  #endif
519   * 
520   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
521   *    goto the label "exit interrupt (simple case)"
522   */
523        LDREG t0,_Context_Switch_necessary
524        LDREG t1,_ISR_Signals_to_thread_executing
525        NOP
526        or    t0,t0,t1
527        beq   t0,zero,_ISR_Handler_exit
528        nop
529
530  /*
531   *  call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
532   */
533        LDREG    t0,R_SR*R_SZ(sp)         /* XXX restore SR on the stack */
534        NOP
535        MTC0     t0,C0_SR
536        la       t0,_ISR_Dispatch
537        MTC0     t0, C0_EPC               /* XXX */
538        NOP
539        j        t0
540        rfe                               /* go to _ISR_Dispatch in task mode */
541
542_ISR_Dispatch:
543        jal _Thread_Dispatch
544        nop
545       
546        li      t0,0x10011001
547        sw      t0,0x8001ff00
548        nop
549  /*
550   *  prepare to get out of interrupt
551   *  return from interrupt  (maybe to _ISR_Dispatch)
552   *
553   *  LABEL "exit interrupt (simple case):"
554   *  prepare to get out of interrupt
555   *  return from interrupt
556   */
557
558_ISR_Handler_exit:
559        LDREG    t0, R_EPC*R_SZ(sp)        /* XXX restore EPC on the stack */
560        LDREG    t1, R_SR*R_SZ(sp)         /* XXX restore SR on the stack */
561        MTC0     t0, C0_EPC               /* XXX */
562        MTC0     t1, C0_SR
563
564/* restore interrupt context from stack */
565     
566        LDREG k0, R_MDLO*R_SZ(sp)
567        LDREG a2, R_A2*R_SZ(sp)
568        mtlo  k0
569        LDREG k0, R_MDHI*R_SZ(sp)           
570        LDREG a3, R_A3*R_SZ(sp)
571        mthi  k0
572        LDREG t0, R_T0*R_SZ(sp)
573        LDREG t1, R_T1*R_SZ(sp)
574        LDREG t2, R_T2*R_SZ(sp)
575        LDREG t3, R_T3*R_SZ(sp)
576        LDREG t4, R_T4*R_SZ(sp)
577        LDREG t5, R_T5*R_SZ(sp)
578        LDREG t6, R_T6*R_SZ(sp)
579        LDREG t7, R_T7*R_SZ(sp)
580        LDREG t8, R_T8*R_SZ(sp)
581        LDREG t9, R_T9*R_SZ(sp)
582        LDREG gp, R_GP*R_SZ(sp)
583        LDREG fp, R_FP*R_SZ(sp)
584        LDREG ra, R_RA*R_SZ(sp)
585        LDREG a0, R_A0*R_SZ(sp)
586        LDREG a1, R_A1*R_SZ(sp)
587        LDREG v1, R_V1*R_SZ(sp)
588        LDREG v0, R_V0*R_SZ(sp)
589        .set noat
590        LDREG AT, R_AT*R_SZ(sp)
591        .set at
592
593        ADDIU     sp,sp,EXCP_STACK_SIZE
594
595        MFC0      k0, C0_EPC
596        NOP
597        j         k0
598        rfe                     /* Might not need to do RFE here... */
599        nop
600
601       .set    reorder
602ENDFRAME(_ISR_Handler)
603
604FRAME(mips_break,sp,0,ra)
605#if 1
606        break 0x0
607        j mips_break
608#else
609        j ra
610#endif
611        nop
612ENDFRAME(mips_break)
613
614
Note: See TracBrowser for help on using the repository browser.