source: rtems/cpukit/score/cpu/mips/cpu_asm.S @ 0ef748fb

4.104.114.84.95
Last change on this file since 0ef748fb was 7f8c11c, checked in by Joel Sherrill <joel.sherrill@…>, on 11/30/00 at 14:02:33

2000-11-30 Joel Sherrill <joel@…>

  • cpu_asm.S: Changed "_CPU_Ccontext_switch_restore: typo to correct name of _CPU_Context_switch_restore. Added dummy version of exc_utlb_code() so applications would link.
  • Property mode set to 100644
File size: 30.8 KB
Line 
1/*  cpu_asm.S
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language
6 *
7 *  Author:     Craig Lebakken <craigl@transition.com>
8 *
9 *  COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *
11 *  To anyone who acknowledges that this file is provided "AS IS"
12 *  without any express or implied warranty:
13 *      permission to use, copy, modify, and distribute this file
14 *      for any purpose is hereby granted without fee, provided that
15 *      the above copyright notice and this notice appears in all
16 *      copies, and that the name of Transition Networks not be used in
17 *      advertising or publicity pertaining to distribution of the
18 *      software without specific, written prior permission.
19 *      Transition Networks makes no representations about the suitability
20 *      of this software for any purpose.
21 *
22 *  Derived from c/src/exec/score/cpu/no_cpu/cpu_asm.s:
23 *
24 *  COPYRIGHT (c) 1989-1999.
25 *  On-Line Applications Research Corporation (OAR).
26 *
27 *  The license and distribution terms for this file may be
28 *  found in the file LICENSE in this distribution or at
29 *  http://www.OARcorp.com/rtems/license.html.
30 *
31 *  $Id$
32 */
33/* @(#)cpu_asm.S       08/20/96     1.15 */
34
35#include "cpu_asm.h"
36
37#include "iregdef.h"
38#include "idtcpu.h"
39
40#define FRAME(name,frm_reg,offset,ret_reg)      \
41        .globl  name;                           \
42        .ent    name;                           \
43name:;                                          \
44        .frame  frm_reg,offset,ret_reg
45#define ENDFRAME(name)                          \
46        .end name
47
48#define EXCP_STACK_SIZE (NREGS*R_SZ)
49#define ISR_VEC_SIZE 4
50
51#if 1  /* 32 bit unsigned32 types */
52#define sint sw
53#define lint lw
54#define stackadd addiu
55#define intadd addu
56#define SZ_INT 4
57#define SZ_INT_POW2 2
58#else /* 64 bit unsigned32 types */
59#define sint dw
60#define lint dw
61#define stackadd daddiu
62#define intadd daddu
63#define SZ_INT 8
64#define SZ_INT_POW2 3
65#endif
66
67#ifdef __GNUC__
68#define EXTERN(x,size) .extern x,size
69#else
70#define EXTERN(x,size)
71#endif
72
73/* NOTE: these constants must match the Context_Control structure in cpu.h */
74#define S0_OFFSET 0
75#define S1_OFFSET 1
76#define S2_OFFSET 2
77#define S3_OFFSET 3
78#define S4_OFFSET 4
79#define S5_OFFSET 5
80#define S6_OFFSET 6
81#define S7_OFFSET 7
82#define SP_OFFSET 8
83#define FP_OFFSET 9
84#define RA_OFFSET 10
85#define C0_SR_OFFSET 11
86#define C0_EPC_OFFSET 12
87
88/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
89#define FP0_OFFSET  0
90#define FP1_OFFSET  1
91#define FP2_OFFSET  2
92#define FP3_OFFSET  3
93#define FP4_OFFSET  4
94#define FP5_OFFSET  5
95#define FP6_OFFSET  6
96#define FP7_OFFSET  7
97#define FP8_OFFSET  8
98#define FP9_OFFSET  9
99#define FP10_OFFSET 10
100#define FP11_OFFSET 11
101#define FP12_OFFSET 12
102#define FP13_OFFSET 13
103#define FP14_OFFSET 14
104#define FP15_OFFSET 15
105#define FP16_OFFSET 16
106#define FP17_OFFSET 17
107#define FP18_OFFSET 18
108#define FP19_OFFSET 19
109#define FP20_OFFSET 20
110#define FP21_OFFSET 21
111#define FP22_OFFSET 22
112#define FP23_OFFSET 23
113#define FP24_OFFSET 24
114#define FP25_OFFSET 25
115#define FP26_OFFSET 26
116#define FP27_OFFSET 27
117#define FP28_OFFSET 28
118#define FP29_OFFSET 29
119#define FP30_OFFSET 30
120#define FP31_OFFSET 31
121
122
123/*PAGE
124 *
125 *  _CPU_ISR_Get_level
126 */
127
128#if 0
129unsigned32 _CPU_ISR_Get_level( void )
130{
131  /*
132   *  This routine returns the current interrupt level.
133   */
134}
135#endif
136
137#if __mips == 3
138/* return the current exception level for the 4650 */
139FRAME(_CPU_ISR_Get_level,sp,0,ra)
140        mfc0 v0,C0_SR
141        nop
142        andi v0,SR_EXL
143        srl v0,1
144        j ra
145ENDFRAME(_CPU_ISR_Get_level)
146
147FRAME(_CPU_ISR_Set_level,sp,0,ra)
148        nop
149        mfc0 v0,C0_SR
150        nop
151        andi v0,SR_EXL
152        beqz v0,_CPU_ISR_Set_1          /* normalize v0 */
153        nop
154        li v0,1         
155_CPU_ISR_Set_1:
156        beq v0,a0,_CPU_ISR_Set_exit     /* if (current_level != new_level ) */
157        nop
158        bnez a0,_CPU_ISR_Set_2
159        nop
160        nop
161        mfc0 t0, C0_SR
162        nop
163        li t1,~SR_EXL
164        and t0,t1
165        nop
166        mtc0 t0,C0_SR                   /* disable exception level */
167        nop
168        j ra
169        nop
170_CPU_ISR_Set_2:
171        nop
172        mfc0 t0,C0_SR
173        nop
174        li t1,~SR_IE
175        and t0,t1
176        nop
177        mtc0 t0,C0_SR                   /* first disable ie bit (recommended) */
178        nop
179        ori  t0, SR_EXL|SR_IE           /* enable exception level */   
180        nop
181        mtc0 t0,C0_SR         
182        nop
183_CPU_ISR_Set_exit:
184        j ra
185        nop
186ENDFRAME(_CPU_ISR_Set_level)
187
188#elif __mips == 1
189
190/* MIPS ISA 1 ( R3000 ) */
191/* These routines might not be needed for the R3000 */
192/* Q:Who calls _CPU_ISR_Get/Set_level? */
193FRAME(_CPU_ISR_Get_level,sp,0,ra)
194        mfc0 v0,C0_SR
195        nop
196        andi v0, SR_IEC
197        j ra
198ENDFRAME(_CPU_ISR_Get_level)
199
200FRAME(_CPU_ISR_Set_level,sp,0,ra)
201        nop
202        mfc0 t0,C0_SR
203        andi a0, SR_IEC
204        or   t0, a0
205        mtc0 t0,C0_SR
206        nop
207        j ra
208ENDFRAME(_CPU_ISR_Set_level)
209
210#else
211   #error "__mips is set to 1 or 3"
212#endif
213
214/*
215 *  _CPU_Context_save_fp_context
216 *
217 *  This routine is responsible for saving the FP context
218 *  at *fp_context_ptr.  If the point to load the FP context
219 *  from is changed then the pointer is modified by this routine.
220 *
221 *  Sometimes a macro implementation of this is in cpu.h which dereferences
222 *  the ** and a similarly named routine in this file is passed something
223 *  like a (Context_Control_fp *).  The general rule on making this decision
224 *  is to avoid writing assembly language.
225 */
226
227/* void _CPU_Context_save_fp(
228 * void **fp_context_ptr
229 * )
230 * {
231 * }
232 */
233
234FRAME(_CPU_Context_save_fp,sp,0,ra)
235        .set noat
236        ld a1,(a0)
237        swc1 $f0,FP0_OFFSET*4(a1)
238        swc1 $f1,FP1_OFFSET*4(a1)
239        swc1 $f2,FP2_OFFSET*4(a1)
240        swc1 $f3,FP3_OFFSET*4(a1)
241        swc1 $f4,FP4_OFFSET*4(a1)
242        swc1 $f5,FP5_OFFSET*4(a1)
243        swc1 $f6,FP6_OFFSET*4(a1)
244        swc1 $f7,FP7_OFFSET*4(a1)
245        swc1 $f8,FP8_OFFSET*4(a1)
246        swc1 $f9,FP9_OFFSET*4(a1)
247        swc1 $f10,FP10_OFFSET*4(a1)
248        swc1 $f11,FP11_OFFSET*4(a1)
249        swc1 $f12,FP12_OFFSET*4(a1)
250        swc1 $f13,FP13_OFFSET*4(a1)
251        swc1 $f14,FP14_OFFSET*4(a1)
252        swc1 $f15,FP15_OFFSET*4(a1)
253        swc1 $f16,FP16_OFFSET*4(a1)
254        swc1 $f17,FP17_OFFSET*4(a1)
255        swc1 $f18,FP18_OFFSET*4(a1)
256        swc1 $f19,FP19_OFFSET*4(a1)
257        swc1 $f20,FP20_OFFSET*4(a1)
258        swc1 $f21,FP21_OFFSET*4(a1)
259        swc1 $f22,FP22_OFFSET*4(a1)
260        swc1 $f23,FP23_OFFSET*4(a1)
261        swc1 $f24,FP24_OFFSET*4(a1)
262        swc1 $f25,FP25_OFFSET*4(a1)
263        swc1 $f26,FP26_OFFSET*4(a1)
264        swc1 $f27,FP27_OFFSET*4(a1)
265        swc1 $f28,FP28_OFFSET*4(a1)
266        swc1 $f29,FP29_OFFSET*4(a1)
267        swc1 $f30,FP30_OFFSET*4(a1)
268        swc1 $f31,FP31_OFFSET*4(a1)
269        j ra
270        nop
271        .set at
272ENDFRAME(_CPU_Context_save_fp)
273
274/*
275 *  _CPU_Context_restore_fp_context
276 *
277 *  This routine is responsible for restoring the FP context
278 *  at *fp_context_ptr.  If the point to load the FP context
279 *  from is changed then the pointer is modified by this routine.
280 *
281 *  Sometimes a macro implementation of this is in cpu.h which dereferences
282 *  the ** and a similarly named routine in this file is passed something
283 *  like a (Context_Control_fp *).  The general rule on making this decision
284 *  is to avoid writing assembly language.
285 */
286
287/* void _CPU_Context_restore_fp(
288 * void **fp_context_ptr
289 * )
290 * {
291 * }
292 */
293
294FRAME(_CPU_Context_restore_fp,sp,0,ra)
295        .set noat
296        ld a1,(a0)
297        lwc1 $f0,FP0_OFFSET*4(a1)
298        lwc1 $f1,FP1_OFFSET*4(a1)
299        lwc1 $f2,FP2_OFFSET*4(a1)
300        lwc1 $f3,FP3_OFFSET*4(a1)
301        lwc1 $f4,FP4_OFFSET*4(a1)
302        lwc1 $f5,FP5_OFFSET*4(a1)
303        lwc1 $f6,FP6_OFFSET*4(a1)
304        lwc1 $f7,FP7_OFFSET*4(a1)
305        lwc1 $f8,FP8_OFFSET*4(a1)
306        lwc1 $f9,FP9_OFFSET*4(a1)
307        lwc1 $f10,FP10_OFFSET*4(a1)
308        lwc1 $f11,FP11_OFFSET*4(a1)
309        lwc1 $f12,FP12_OFFSET*4(a1)
310        lwc1 $f13,FP13_OFFSET*4(a1)
311        lwc1 $f14,FP14_OFFSET*4(a1)
312        lwc1 $f15,FP15_OFFSET*4(a1)
313        lwc1 $f16,FP16_OFFSET*4(a1)
314        lwc1 $f17,FP17_OFFSET*4(a1)
315        lwc1 $f18,FP18_OFFSET*4(a1)
316        lwc1 $f19,FP19_OFFSET*4(a1)
317        lwc1 $f20,FP20_OFFSET*4(a1)
318        lwc1 $f21,FP21_OFFSET*4(a1)
319        lwc1 $f22,FP22_OFFSET*4(a1)
320        lwc1 $f23,FP23_OFFSET*4(a1)
321        lwc1 $f24,FP24_OFFSET*4(a1)
322        lwc1 $f25,FP25_OFFSET*4(a1)
323        lwc1 $f26,FP26_OFFSET*4(a1)
324        lwc1 $f27,FP27_OFFSET*4(a1)
325        lwc1 $f28,FP28_OFFSET*4(a1)
326        lwc1 $f29,FP29_OFFSET*4(a1)
327        lwc1 $f30,FP30_OFFSET*4(a1)
328        lwc1 $f31,FP31_OFFSET*4(a1)
329        j ra
330        nop
331        .set at
332ENDFRAME(_CPU_Context_restore_fp)
333
334/*  _CPU_Context_switch
335 *
336 *  This routine performs a normal non-FP context switch.
337 */
338
339/* void _CPU_Context_switch(
340 * Context_Control  *run,
341 * Context_Control  *heir
342 * )
343 * {
344 * }
345 */
346#if __mips == 3
347/* MIPS ISA Level 3 ( R4xxx ) */
348
349FRAME(_CPU_Context_switch,sp,0,ra)
350
351        mfc0 t0,C0_SR
352        li t1,~SR_IE
353        sd t0,C0_SR_OFFSET*8(a0)        /* save status register */
354        and t0,t1
355        mtc0 t0,C0_SR                   /* first disable ie bit (recommended) */
356        ori t0,SR_EXL|SR_IE             /* enable exception level to disable interrupts */
357        mtc0 t0,C0_SR
358
359        sd ra,RA_OFFSET*8(a0)           /* save current context */
360        sd sp,SP_OFFSET*8(a0)
361        sd fp,FP_OFFSET*8(a0)
362        sd s1,S1_OFFSET*8(a0)
363        sd s2,S2_OFFSET*8(a0)
364        sd s3,S3_OFFSET*8(a0)
365        sd s4,S4_OFFSET*8(a0)
366        sd s5,S5_OFFSET*8(a0)
367        sd s6,S6_OFFSET*8(a0)
368        sd s7,S7_OFFSET*8(a0)
369        dmfc0 t0,C0_EPC
370        sd t0,C0_EPC_OFFSET*8(a0)
371
372_CPU_Context_switch_restore:
373        ld s0,S0_OFFSET*8(a1)           /* restore context */
374        ld s1,S1_OFFSET*8(a1)
375        ld s2,S2_OFFSET*8(a1)
376        ld s3,S3_OFFSET*8(a1)
377        ld s4,S4_OFFSET*8(a1)
378        ld s5,S5_OFFSET*8(a1)
379        ld s6,S6_OFFSET*8(a1)
380        ld s7,S7_OFFSET*8(a1)
381        ld fp,FP_OFFSET*8(a1)
382        ld sp,SP_OFFSET*8(a1)
383        ld ra,RA_OFFSET*8(a1)
384        ld t0,C0_EPC_OFFSET*8(a1)
385        dmtc0 t0,C0_EPC
386        ld t0,C0_SR_OFFSET*8(a1)
387        andi t0,SR_EXL
388        bnez t0,_CPU_Context_1          /* set exception level from restore context */
389        li t0,~SR_EXL
390        mfc0 t1,C0_SR
391        nop
392        and t1,t0
393        mtc0 t1,C0_SR
394_CPU_Context_1:
395        j ra
396        nop
397ENDFRAME(_CPU_Context_switch)
398
399#elif __mips == 1
400/* MIPS ISA Level 1 ( R3000 ) */
401
402FRAME(_CPU_Context_switch,sp,0,ra)
403
404        mfc0 t0,C0_SR
405        li t1,~SR_IEC
406        sw t0,C0_SR_OFFSET*4(a0)        /* save status register */
407        and t0,t1
408        mtc0 t0,C0_SR                   /* first disable ie bit (recommended) */
409
410        sw ra,RA_OFFSET*4(a0)           /* save current context */
411        sw sp,SP_OFFSET*4(a0)
412        sw fp,FP_OFFSET*4(a0)
413        sw s0,S0_OFFSET*4(a0)
414        sw s1,S1_OFFSET*4(a0)
415        sw s2,S2_OFFSET*4(a0)
416        sw s3,S3_OFFSET*4(a0)
417        sw s4,S4_OFFSET*4(a0)
418        sw s5,S5_OFFSET*4(a0)
419        sw s6,S6_OFFSET*4(a0)
420        sw s7,S7_OFFSET*4(a0)
421
422        mfc0 t0,C0_EPC
423        sw t0,C0_EPC_OFFSET*4(a0)
424
425_CPU_Context_switch_restore:
426        lw s0,S0_OFFSET*4(a1)           /* restore context */
427        lw s1,S1_OFFSET*4(a1)
428        lw s2,S2_OFFSET*4(a1)
429        lw s3,S3_OFFSET*4(a1)
430        lw s4,S4_OFFSET*4(a1)
431        lw s5,S5_OFFSET*4(a1)
432        lw s6,S6_OFFSET*4(a1)
433        lw s7,S7_OFFSET*4(a1)
434        lw fp,FP_OFFSET*4(a1)
435        lw sp,SP_OFFSET*4(a1)
436        lw ra,RA_OFFSET*4(a1)
437        lw t0,C0_EPC_OFFSET*4(a1)
438        mtc0 t0,C0_EPC
439        lw t1, C0_SR_OFFSET*4(a1)
440        mtc0 t1,C0_SR
441
442        /* Q:Changes needed to SR_IEC bit in SR/_CPU_Context_switch_restore? */
443
444_CPU_Context_1:
445        j ra
446        nop
447ENDFRAME(_CPU_Context_switch)
448
449#else
450
451   #error "__mips is not set to 1 or 3"
452
453#endif
454
455/*
456 *  _CPU_Context_restore
457 *
458 *  This routine is generally used only to restart self in an
459 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
460 *
461 *  NOTE: May be unnecessary to reload some registers.
462 */
463
464#if 0
465void _CPU_Context_restore(
466  Context_Control *new_context
467)
468{
469}
470#endif
471
472#if __mips == 3
473
474FRAME(_CPU_Context_restore,sp,0,ra)
475        dadd a1,a0,zero
476        j _CPU_Context_switch_restore
477        nop
478ENDFRAME(_CPU_Context_restore)
479
480#elif __mips == 1
481
482FRAME(_CPU_Context_restore,sp,0,ra)
483        add  a1,a0,zero
484        j _CPU_Context_switch_restore
485        nop
486ENDFRAME(_CPU_Context_restore)
487
488#else
489
490      #error "__mips is not set to 1 or 3"
491
492#endif
493
494EXTERN(_ISR_Nest_level, SZ_INT)
495EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
496EXTERN(_Context_Switch_necessary,SZ_INT)
497EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
498.extern _Thread_Dispatch
499.extern _ISR_Vector_table
500
501/*  void __ISR_Handler()
502 *
503 *  This routine provides the RTEMS interrupt management.
504 *
505 */
506
507#if 0
508void _ISR_Handler()
509{
510   /*
511    *  This discussion ignores a lot of the ugly details in a real
512    *  implementation such as saving enough registers/state to be
513    *  able to do something real.  Keep in mind that the goal is
514    *  to invoke a user's ISR handler which is written in C and
515    *  uses a certain set of registers.
516    *
517    *  Also note that the exact order is to a large extent flexible.
518    *  Hardware will dictate a sequence for a certain subset of
519    *  _ISR_Handler while requirements for setting
520    */
521
522  /*
523   *  At entry to "common" _ISR_Handler, the vector number must be
524   *  available.  On some CPUs the hardware puts either the vector
525   *  number or the offset into the vector table for this ISR in a
526   *  known place.  If the hardware does not give us this information,
527   *  then the assembly portion of RTEMS for this port will contain
528   *  a set of distinct interrupt entry points which somehow place
529   *  the vector number in a known place (which is safe if another
530   *  interrupt nests this one) and branches to _ISR_Handler.
531   *
532   */
533#endif
534
535#if __mips == 3
536/* ----------------------------------------------------------------------------- */
537FRAME(_ISR_Handler,sp,0,ra)
538.set noreorder
539#if USE_IDTKIT
540/* IDT/Kit incorrectly adds 4 to EPC before returning.  This compensates */
541        lreg    k0, R_EPC*R_SZ(sp)
542        daddiu    k0,k0,-4
543        sreg    k0, R_EPC*R_SZ(sp)
544        lreg    k0, R_CAUSE*R_SZ(sp)
545        li      k1, ~CAUSE_BD
546        and     k0, k1
547        sreg    k0, R_CAUSE*R_SZ(sp)
548#endif
549       
550/* save registers not already saved by IDT/sim */
551        stackadd sp,sp,-EXCP_STACK_SIZE /* store ra on the stack */
552
553        sreg    ra, R_RA*R_SZ(sp)
554        sreg    v0, R_V0*R_SZ(sp)
555        sreg    v1, R_V1*R_SZ(sp)
556        sreg    a0, R_A0*R_SZ(sp)
557        sreg    a1, R_A1*R_SZ(sp)
558        sreg    a2, R_A2*R_SZ(sp)
559        sreg    a3, R_A3*R_SZ(sp)
560        sreg    t0, R_T0*R_SZ(sp)
561        sreg    t1, R_T1*R_SZ(sp)
562        sreg    t2, R_T2*R_SZ(sp)
563        sreg    t3, R_T3*R_SZ(sp)
564        sreg    t4, R_T4*R_SZ(sp)
565        sreg    t5, R_T5*R_SZ(sp)
566        sreg    t6, R_T6*R_SZ(sp)
567        sreg    t7, R_T7*R_SZ(sp)
568        mflo    k0
569        sreg    t8, R_T8*R_SZ(sp)
570        sreg    k0, R_MDLO*R_SZ(sp)
571        sreg    t9, R_T9*R_SZ(sp)
572        mfhi    k0
573        sreg    gp, R_GP*R_SZ(sp)
574        sreg    fp, R_FP*R_SZ(sp)
575        sreg    k0, R_MDHI*R_SZ(sp)
576        .set noat
577        sreg    AT, R_AT*R_SZ(sp)
578        .set at
579
580        stackadd sp,sp,-40              /* store ra on the stack */
581        sd ra,32(sp)
582
583/* determine if an interrupt generated this exception */
584        mfc0 k0,C0_CAUSE
585        and k1,k0,CAUSE_EXCMASK
586        bnez k1,_ISR_Handler_prom_exit /* not an external interrup
587t, pass exception to Monitor */
588        mfc0 k1,C0_SR
589        and k0,k1
590        and k0,CAUSE_IPMASK
591        beq k0,zero,_ISR_Handler_quick_exit /* external interrupt not enabled, ignore */
592        nop
593
594  /*
595   *  save some or all context on stack
596   *  may need to save some special interrupt information for exit
597   *
598   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
599   *    if ( _ISR_Nest_level == 0 )
600   *      switch to software interrupt stack
601   *  #endif
602   */
603#if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
604        lint t0,_ISR_Nest_level
605        beq t0, zero,  _ISR_Handler_1
606        nop
607        /* switch stacks */     
608_ISR_Handler_1:
609#else
610        lint t0,_ISR_Nest_level
611#endif
612  /*
613   *  _ISR_Nest_level++;
614   */
615        addi t0,t0,1
616        sint t0,_ISR_Nest_level
617  /*
618   *  _Thread_Dispatch_disable_level++;
619   */
620        lint t1,_Thread_Dispatch_disable_level
621        addi t1,t1,1
622        sint t1,_Thread_Dispatch_disable_level
623#if 0
624        nop
625        j _ISR_Handler_4
626        nop
627  /*
628   *  while ( interrupts_pending(cause_reg) ) {
629   *     vector = BITFIELD_TO_INDEX(cause_reg);
630   *     (*_ISR_Vector_table[ vector ])( vector );
631   *  }
632   */
633_ISR_Handler_2:
634/* software interrupt priorities can be applied here */
635        li t1,-1
636/* convert bit field into interrupt index */
637_ISR_Handler_3:
638        andi t2,t0,1
639        addi t1,1
640        beql t2,zero,_ISR_Handler_3
641        dsrl t0,1
642        li t1,7
643        dsll t1,3                       /* convert index to byte offset (*8) */
644        la t3,_ISR_Vector_table
645        intadd t1,t3
646        lint t1,(t1)
647        jalr t1
648        nop
649        j _ISR_Handler_5
650        nop
651_ISR_Handler_4:
652        mfc0 t0,C0_CAUSE
653        andi t0,CAUSE_IPMASK
654        bne t0,zero,_ISR_Handler_2
655        dsrl t0,t0,8
656_ISR_Handler_5:
657#else
658        nop
659        li t1,7
660        dsll t1,t1,SZ_INT_POW2
661        la t3,_ISR_Vector_table
662        intadd t1,t3
663        lint t1,(t1)
664        jalr t1
665        nop
666#endif
667  /*
668   *  --_ISR_Nest_level;
669   */
670        lint t2,_ISR_Nest_level
671        addi t2,t2,-1
672        sint t2,_ISR_Nest_level
673  /*
674   *  --_Thread_Dispatch_disable_level;
675   */
676        lint t1,_Thread_Dispatch_disable_level
677        addi t1,t1,-1
678        sint t1,_Thread_Dispatch_disable_level
679  /*
680   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
681   *    goto the label "exit interrupt (simple case)"
682   */
683        or t0,t2,t1
684        bne t0,zero,_ISR_Handler_exit
685        nop
686  /*
687   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
688   *    restore stack
689   *  #endif
690   * 
691   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
692   *    goto the label "exit interrupt (simple case)"
693   */
694        lint t0,_Context_Switch_necessary
695        lint t1,_ISR_Signals_to_thread_executing
696        or t0,t0,t1
697        beq t0,zero,_ISR_Handler_exit
698        nop
699
700  /*
701   *  call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
702   */
703        jal _Thread_Dispatch
704        nop
705  /*
706   *  prepare to get out of interrupt
707   *  return from interrupt  (maybe to _ISR_Dispatch)
708   *
709   *  LABEL "exit interrupt (simple case):
710   *  prepare to get out of interrupt
711   *  return from interrupt
712   */
713_ISR_Handler_exit:
714        ld ra,32(sp)
715        stackadd sp,sp,40
716
717/* restore interrupt context from stack */
718        lreg    k0, R_MDLO*R_SZ(sp)
719        mtlo    k0
720        lreg    k0, R_MDHI*R_SZ(sp)
721        lreg    a2, R_A2*R_SZ(sp)
722        mthi    k0
723        lreg    a3, R_A3*R_SZ(sp)
724        lreg    t0, R_T0*R_SZ(sp)
725        lreg    t1, R_T1*R_SZ(sp)
726        lreg    t2, R_T2*R_SZ(sp)
727        lreg    t3, R_T3*R_SZ(sp)
728        lreg    t4, R_T4*R_SZ(sp)
729        lreg    t5, R_T5*R_SZ(sp)
730        lreg    t6, R_T6*R_SZ(sp)
731        lreg    t7, R_T7*R_SZ(sp)
732        lreg    t8, R_T8*R_SZ(sp)
733        lreg    t9, R_T9*R_SZ(sp)
734        lreg    gp, R_GP*R_SZ(sp)
735        lreg    fp, R_FP*R_SZ(sp)
736        lreg    ra, R_RA*R_SZ(sp)
737        lreg    a0, R_A0*R_SZ(sp)
738        lreg    a1, R_A1*R_SZ(sp)
739        lreg    v1, R_V1*R_SZ(sp)
740        lreg    v0, R_V0*R_SZ(sp)
741        .set noat
742        lreg    AT, R_AT*R_SZ(sp)
743        .set at
744
745        stackadd sp,sp,EXCP_STACK_SIZE /* store ra on the stack */
746
747#if USE_IDTKIT
748/* we handled exception, so return non-zero value */
749        li v0,1
750#endif
751
752_ISR_Handler_quick_exit:
753#ifdef USE_IDTKIT
754        j ra
755#else
756        eret
757#endif
758        nop
759
760_ISR_Handler_prom_exit:
761#if __mips == 1
762        la      k0, (R_VEC+((48)*8))
763#endif
764
765#if __mips == 3
766        la      k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */
767#endif
768        j       k0
769        nop
770
771       .set    reorder
772
773ENDFRAME(_ISR_Handler)
774
775/* ---------------------------------------------------------------------- */
776#elif __mips == 1
777/* MIPS ISA Level 1 */
778
779FRAME(_ISR_Handler,sp,0,ra)
780.set noreorder
781
782        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
783
784        addiu sp,sp,-EXCP_STACK_SIZE       /* wastes alot of stack space for context?? */
785
786        sw    ra, R_RA*R_SZ(sp)  /* store ra on the stack */
787        sw    v0, R_V0*R_SZ(sp)
788        sw    v1, R_V1*R_SZ(sp)
789        sw    a0, R_A0*R_SZ(sp)
790        sw    a1, R_A1*R_SZ(sp)
791        sw    a2, R_A2*R_SZ(sp)
792        sw    a3, R_A3*R_SZ(sp)
793        sw    t0, R_T0*R_SZ(sp)
794        sw    t1, R_T1*R_SZ(sp)
795        sw    t2, R_T2*R_SZ(sp)
796        sw    t3, R_T3*R_SZ(sp)
797        sw    t4, R_T4*R_SZ(sp)
798        sw    t5, R_T5*R_SZ(sp)
799        sw    t6, R_T6*R_SZ(sp)
800        sw    t7, R_T7*R_SZ(sp)
801        mflo    k0
802        sw    t8, R_T8*R_SZ(sp)
803        sw    k0, R_MDLO*R_SZ(sp)
804        sw    t9, R_T9*R_SZ(sp)
805        mfhi    k0
806        sw    gp, R_GP*R_SZ(sp)
807        sw    fp, R_FP*R_SZ(sp)
808        sw    k0, R_MDHI*R_SZ(sp)
809        .set noat
810        sw    AT, R_AT*R_SZ(sp)
811        .set at
812
813        /* Q: Why hardcode -40 for stack add??? */
814        /* This needs to be figured out.........*/
815        addiu sp,sp,-40
816        sw ra,32(sp)    /* store ra on the stack */
817
818/* determine if an interrupt generated this exception */
819
820        mfc0 k0,C0_CAUSE
821        and k1,k0,CAUSE_EXCMASK
822        beq k1, 0, _ISR_Handler_1
823        nop
824
825_ISR_Handler_Exception:
826        nop
827        b  _ISR_Handler_Exception       /* Jump to the exception code */
828        nop
829
830_ISR_Handler_1:
831
832        mfc0 k1,C0_SR
833        and k0,k1
834        and k0,CAUSE_IPMASK
835        beq k0,zero,_ISR_Handler_exit /* external interrupt not enabled, ignore */
836                                      /* but if it's not an exception or an interrupt,
837                                      /* Then where did it come from??? */
838        nop
839
840  /*
841   *  save some or all context on stack
842   *  may need to save some special interrupt information for exit
843   *
844   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
845   *    if ( _ISR_Nest_level == 0 )
846   *      switch to software interrupt stack
847   *  #endif
848   */
849
850  /*
851   *  _ISR_Nest_level++;
852   */
853        lw   t0,_ISR_Nest_level
854        addi t0,t0,1
855        sw   t0,_ISR_Nest_level
856  /*
857   *  _Thread_Dispatch_disable_level++;
858   */
859        lw   t1,_Thread_Dispatch_disable_level
860        addi t1,t1,1
861        sw   t1,_Thread_Dispatch_disable_level
862
863  /*
864   *  while ( interrupts_pending(cause_reg) ) {
865   *     vector = BITFIELD_TO_INDEX(cause_reg);
866   *     (*_ISR_Vector_table[ vector ])( vector );
867   *  }
868   */
869        /* k0 has the SR interrupt bits */
870        la t3, _ISR_Vector_table
871
872        /* The bits you look at can be prioritized here just by */
873        /*  changing what bit is looked at. I.E. SR_IBITx */
874        /* This code might become a loop, servicing all ints before returning.. */
875        /*   Right now, it will go thru the whole list once */
876
877_ISR_check_bit_0:
878        and k1, k0, SR_IBIT1
879        beq k1, zero, _ISR_check_bit_1
880        nop
881        li  t1, ISR_VEC_SIZE*0
882        add t3, t1
883        jal t3 
884        nop
885_ISR_check_bit_1:
886        and k1, k0, SR_IBIT2
887        beq k1, zero, _ISR_check_bit_2
888        nop
889        li  t1, ISR_VEC_SIZE*1
890        add t3, t1
891        jal t3
892        nop
893_ISR_check_bit_2:
894        and k1, k0, SR_IBIT3
895        beq k1, zero, _ISR_check_bit_3
896        nop
897        li  t1, ISR_VEC_SIZE*2
898        add t3, t1
899        jal t3
900        nop
901_ISR_check_bit_3:
902        and k1, k0, SR_IBIT4
903        beq k1, zero, _ISR_check_bit_4
904        nop
905        li  t1, ISR_VEC_SIZE*3
906        add t3, t1
907        jal t3
908        nop
909_ISR_check_bit_4:
910        and k1, k0, SR_IBIT5
911        beq k1, zero, _ISR_check_bit_5
912        nop
913        li  t1, ISR_VEC_SIZE*4
914        add t3, t1
915        jal t3
916        nop
917_ISR_check_bit_5:
918        and k1, k0, SR_IBIT6
919        beq k1, zero, _ISR_check_bit_6
920        nop
921        li  t1, ISR_VEC_SIZE*5
922        add t3, t1
923        jal t3
924        nop
925_ISR_check_bit_6:
926        and k1, k0, SR_IBIT7
927        beq k1, zero, _ISR_check_bit_7
928        nop
929        li  t1, ISR_VEC_SIZE*6
930        add t3, t1
931        jal t3
932        nop
933_ISR_check_bit_7:
934        and k1, k0, SR_IBIT8
935        beq k1, zero, _ISR_exit_int_check
936        nop
937        li  t1, ISR_VEC_SIZE*7
938        add t3, t1
939        jal t3
940        nop
941
942_ISR_exit_int_check:
943
944  /*
945   *  --_ISR_Nest_level;
946   */
947        lw   t2,_ISR_Nest_level
948        addi t2,t2,-1
949        sw   t2,_ISR_Nest_level
950  /*
951   *  --_Thread_Dispatch_disable_level;
952   */
953        lw   t1,_Thread_Dispatch_disable_level
954        addi t1,t1,-1
955        sw   t1,_Thread_Dispatch_disable_level
956  /*
957   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
958   *    goto the label "exit interrupt (simple case)"
959   */
960        or  t0,t2,t1
961        bne t0,zero,_ISR_Handler_exit
962        nop
963  /*
964   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
965   *    restore stack
966   *  #endif
967   * 
968   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
969   *    goto the label "exit interrupt (simple case)"
970   */
971        lw  t0,_Context_Switch_necessary
972        lw  t1,_ISR_Signals_to_thread_executing
973        or  t0,t0,t1
974        beq t0,zero,_ISR_Handler_exit
975        nop
976  /*
977   *  call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
978   */
979        jal _Thread_Dispatch
980        nop
981  /*
982   *  prepare to get out of interrupt
983   *  return from interrupt  (maybe to _ISR_Dispatch)
984   *
985   *  LABEL "exit interrupt (simple case):
986   *  prepare to get out of interrupt
987   *  return from interrupt
988   */
989
990_ISR_Handler_exit:
991        ld ra,32(sp)
992        addiu sp,sp,40    /* Q: Again with the 40...Is this needed? */
993
994/* restore interrupt context from stack */
995     
996        lw    k0, R_MDLO*R_SZ(sp)           
997        mtlo  k0
998        lw    k0, R_MDHI*R_SZ(sp)           
999        lw    a2, R_A2*R_SZ(sp)
1000        mthi  k0
1001        lw    a3, R_A3*R_SZ(sp)
1002        lw    t0, R_T0*R_SZ(sp)
1003        lw    t1, R_T1*R_SZ(sp)
1004        lw    t2, R_T2*R_SZ(sp)
1005        lw    t3, R_T3*R_SZ(sp)
1006        lw    t4, R_T4*R_SZ(sp)
1007        lw    t5, R_T5*R_SZ(sp)
1008        lw    t6, R_T6*R_SZ(sp)
1009        lw    t7, R_T7*R_SZ(sp)
1010        lw    t8, R_T8*R_SZ(sp)
1011        lw    t9, R_T9*R_SZ(sp)
1012        lw    gp, R_GP*R_SZ(sp)
1013        lw    fp, R_FP*R_SZ(sp)
1014        lw    ra, R_RA*R_SZ(sp)
1015        lw    a0, R_A0*R_SZ(sp)
1016        lw    a1, R_A1*R_SZ(sp)
1017        lw    v1, R_V1*R_SZ(sp)
1018        lw    v0, R_V0*R_SZ(sp)
1019        .set noat
1020        lw    AT, R_AT*R_SZ(sp)
1021        .set at
1022
1023        addiu sp,sp,EXCP_STACK_SIZE
1024       
1025        rfe  /* Might not need to do RFE here... */
1026        j ra
1027        nop
1028
1029       .set    reorder
1030ENDFRAME(_ISR_Handler)
1031
1032#else
1033
1034   #error "__mips is not set to 1 or 3 "
1035
1036#endif
1037
1038FRAME(mips_enable_interrupts,sp,0,ra)
1039        mfc0 t0,C0_SR           /* get status reg */
1040        nop
1041        or t0,t0,a0             
1042        mtc0 t0,C0_SR           /* save updated status reg */
1043        j ra
1044        nop
1045ENDFRAME(mips_enable_interrupts)
1046
1047FRAME(mips_disable_interrupts,sp,0,ra)
1048        mfc0 v0,C0_SR           /* get status reg */
1049        li t1,SR_IMASK          /* t1 = load interrupt mask word */
1050        not t0,t1               /* t0 = ~t1 */
1051        and t0,v0               /* clear imask bits */
1052        mtc0 t0,C0_SR           /* save status reg */
1053        and v0,t1               /* mask return value (only return imask bits) */
1054        jr ra
1055        nop
1056ENDFRAME(mips_disable_interrupts)
1057
1058#if __mips == 3
1059
1060FRAME(mips_enable_global_interrupts,sp,0,ra)
1061        mfc0 t0,C0_SR           /* get status reg */
1062        nop
1063        ori t0,SR_IE
1064        mtc0 t0,C0_SR           /* save updated status reg */
1065        j ra
1066        nop
1067ENDFRAME(mips_enable_global_interrupts)
1068
1069FRAME(mips_disable_global_interrupts,sp,0,ra)
1070        li t1,SR_IE
1071        mfc0 t0,C0_SR           /* get status reg */
1072        not t1
1073        and t0,t1
1074        mtc0 t0,C0_SR           /* save updated status reg */
1075        j ra
1076        nop
1077ENDFRAME(mips_disable_global_interrupts)
1078
1079#elif __mips == 1
1080
1081FRAME(mips_enable_global_interrupts,sp,0,ra)
1082        mfc0 t0,C0_SR           /* get status reg */
1083        nop
1084        ori t0,SR_IEC
1085        mtc0 t0,C0_SR           /* save updated status reg */
1086        j ra
1087        nop
1088ENDFRAME(mips_enable_global_interrupts)
1089
1090FRAME(mips_disable_global_interrupts,sp,0,ra)
1091        li t1,SR_IEC
1092        mfc0 t0,C0_SR           /* get status reg */
1093        not t1
1094        and t0,t1
1095        mtc0 t0,C0_SR           /* save updated status reg */
1096        j ra
1097        nop
1098ENDFRAME(mips_disable_global_interrupts)
1099
1100#else
1101
1102   #error "__mips is not set to 1 or 3"
1103
1104#endif
1105
1106/* return the value of the status register in v0.  Used for debugging */
1107FRAME(mips_get_sr,sp,0,ra)
1108        mfc0 v0,C0_SR
1109        j ra
1110        nop
1111ENDFRAME(mips_get_sr)
1112
1113FRAME(mips_break,sp,0,ra)
1114#if 1
1115        break 0x0
1116        j mips_break
1117#else
1118        j ra
1119#endif
1120        nop
1121ENDFRAME(mips_break)
1122
1123
1124/**************************************************************************
1125**
1126** enable_int(mask) - enables interrupts - mask is positioned so it only
1127**                      needs to be or'ed into the status reg. This
1128**                      also does some other things !!!! caution should
1129**                      be used if invoking this while in the middle
1130**                      of a debugging session where the client may have
1131**                      nested interrupts.
1132**
1133****************************************************************************/
1134FRAME(enable_int,sp,0,ra)
1135        .set    noreorder
1136        mfc0    t0,C0_SR
1137        or      a0,1
1138        or      t0,a0
1139        mtc0    t0,C0_SR
1140        j       ra
1141        nop
1142        .set    reorder
1143ENDFRAME(enable_int)
1144
1145
1146/***************************************************************************
1147**
1148**      disable_int(mask) - disable the interrupt - mask is the complement
1149**                          of the bits to be cleared - i.e. to clear ext int
1150**                          5 the mask would be - 0xffff7fff
1151**
1152****************************************************************************/
1153FRAME(disable_int,sp,0,ra)
1154        .set    noreorder
1155        mfc0    t0,C0_SR
1156        nop
1157        and     t0,a0
1158        mtc0    t0,C0_SR
1159        j       ra
1160        nop
1161ENDFRAME(disable_int)
1162
1163
1164/*PAGE
1165 *
1166 *  _CPU_Internal_threads_Idle_thread_body
1167 *
1168 *  NOTES:
1169 *
1170 *  1. This is the same as the regular CPU independent algorithm.
1171 *
1172 *  2. If you implement this using a "halt", "idle", or "shutdown"
1173 *     instruction, then don't forget to put it in an infinite loop.
1174 *
1175 *  3. Be warned. Some processors with onboard DMA have been known
1176 *     to stop the DMA if the CPU were put in IDLE mode.  This might
1177 *     also be a problem with other on-chip peripherals.  So use this
1178 *     hook with caution.
1179 */
1180
1181#if __mips == 3
1182
1183FRAME(_CPU_Thread_Idle_body,sp,0,ra)
1184        wait                    /* enter low power mode */
1185        j _CPU_Thread_Idle_body
1186        nop
1187ENDFRAME(_CPU_Thread_Idle_body)
1188
1189#elif __mips == 1
1190
1191FRAME(_CPU_Thread_Idle_body,sp,0,ra)
1192        nop                     /* no wait instruction */
1193        j _CPU_Thread_Idle_body
1194        nop
1195ENDFRAME(_CPU_Thread_Idle_body)
1196
1197#else
1198
1199   #error "__mips not set to 1 or 3"
1200   
1201#endif
1202
1203/**************************************************************************
1204**
1205**      init_exc_vecs() - moves the exception code into the addresses
1206**                        reserved for exception vectors
1207**
1208**      UTLB Miss exception vector at address 0x80000000
1209**
1210**      General exception vector at address 0x80000080
1211**
1212**      RESET exception vector is at address 0xbfc00000
1213**
1214***************************************************************************/
1215
1216#define VEC_CODE_LENGTH 10*4
1217
1218FRAME(init_exc_vecs,sp,0,ra)
1219
1220#if __mips == 1
1221
1222        .set    noreorder
1223        la      t1,exc_utlb_code
1224        la      t2,exc_norm_code
1225        li      t3,UT_VEC
1226        li      t4,E_VEC
1227        li      t5,VEC_CODE_LENGTH
12281:
1229        lw      t6,0(t1)
1230        lw      t7,0(t2)
1231        sw      t6,0(t3)
1232        sw      t7,0(t4)
1233        addiu   t1,4
1234        addiu   t3,4
1235        addiu   t4,4
1236        subu    t5,4
1237        bne     t5,zero,1b
1238        addiu   t2,4
1239        move    t5,ra           # assumes clear_cache doesnt use t5
1240        li      a0,UT_VEC
1241        jal     clear_cache          /* Check out clear cache.... */
1242        li      a1,VEC_CODE_LENGTH
1243        nop
1244        li      a0,E_VEC
1245        jal     clear_cache
1246        li      a1,VEC_CODE_LENGTH
1247        move    ra,t5           # restore ra
1248        j       ra
1249        nop
1250        .set    reorder
1251
1252#elif __mips == 3
1253
1254        .set reorder
1255        move    t5,ra           # assumes clear_cache doesnt use t5
1256
1257        /* TLB exception vector */
1258        la      t1,exc_tlb_code
1259        li      t2,T_VEC |K1BASE
1260        li      t3,VEC_CODE_LENGTH
12611:
1262        lw      t6,0(t1)
1263        addiu   t1,4
1264        subu    t3,4
1265        sw      t6,0(t2)
1266        addiu   t2,4
1267        bne     t3,zero,1b
1268
1269        li      a0,T_VEC
1270        li      a1,VEC_CODE_LENGTH
1271        jal     clear_cache
1272
1273        la      t1,exc_xtlb_code
1274        li      t2,X_VEC |K1BASE
1275        li      t3,VEC_CODE_LENGTH
12761:
1277        lw      t6,0(t1)
1278        addiu   t1,4
1279        subu    t3,4
1280        sw      t6,0(t2)
1281        addiu   t2,4
1282        bne     t3,zero,1b
1283
1284        /* extended TLB exception vector */
1285        li      a0,X_VEC
1286        li      a1,VEC_CODE_LENGTH
1287        jal     clear_cache
1288
1289        /* cache error exception vector */
1290        la      t1,exc_cache_code
1291        li      t2,C_VEC |K1BASE
1292        li      t3,VEC_CODE_LENGTH
12931:
1294        lw      t6,0(t1)
1295        addiu   t1,4
1296        subu    t3,4
1297        sw      t6,0(t2)
1298        addiu   t2,4
1299        bne     t3,zero,1b
1300
1301        li      a0,C_VEC
1302        li      a1,VEC_CODE_LENGTH
1303        jal     clear_cache
1304
1305        /* normal exception vector */
1306        la      t1,exc_norm_code
1307        li      t2,E_VEC |K1BASE
1308        li      t3,VEC_CODE_LENGTH
13091:
1310        lw      t6,0(t1)
1311        addiu   t1,4
1312        subu    t3,4
1313        sw      t6,0(t2)
1314        addiu   t2,4
1315        bne     t3,zero,1b
1316
1317        li      a0,E_VEC
1318        li      a1,VEC_CODE_LENGTH
1319        jal     clear_cache
1320
1321        move    ra,t5           # restore ra
1322        j       ra
1323
1324#else
1325   #error "__mips not set to 1 or 3"
1326#endif
1327
1328ENDFRAME(init_exc_vecs)
1329
1330FRAME(exc_utlb_code,sp,0,ra)
1331        la      k0, _ISR_Handler /* XXX not right -- but need to link*/
1332        j       k0
1333        nop
1334ENDFRAME(exc_utlb_code)
1335
1336FRAME(exc_norm_code,sp,0,ra)
1337        la      k0, _ISR_Handler /* generic external int hndlr */
1338        j       k0
1339        nop
1340ENDFRAME(exc_norm_code)
1341
1342/*
1343** Again, reliance on SIM. Not good.
1344*/
1345#if __mips == 3
1346
1347FRAME(exc_tlb_code,sp,0,ra)
1348        la      k0, (R_VEC+((112)*8)) /* R4000 Sim location */
1349        j       k0
1350        nop
1351ENDFRAME(exc_tlb_code)
1352
1353FRAME(exc_xtlb_code,sp,0,ra)
1354        la      k0, (R_VEC+((112)*8)) /* R4000 Sim location */
1355        j       k0
1356        nop
1357
1358ENDFRAME(exc_xtlb_code)
1359
1360FRAME(exc_cache_code,sp,0,ra)
1361        la      k0, (R_VEC+((112)*8)) /* R4000 Sim location */
1362        j       k0
1363        nop
1364ENDFRAME(exc_cache_code)
1365
1366#elif __mips == 1
1367/* ------------------------------------------------------ */
1368FRAME(exc_tlb_code,sp,0,ra)
1369        la      k0, (R_VEC+((48)*8))  /* Need something else here besides IDT/SIM call */
1370        j       k0
1371        nop
1372ENDFRAME(exc_tlb_code)
1373
1374FRAME(exc_cache_code,sp,0,ra)
1375        la      k0, (R_VEC+((48)*8))
1376        j       k0
1377        nop
1378ENDFRAME(exc_cache_code)
1379
1380#else
1381
1382   #error "__mips is not set to 1 or 3"
1383
1384#endif
1385
Note: See TracBrowser for help on using the repository browser.