source: rtems/cpukit/score/cpu/mips/cpu_asm.S @ b72e847b

4.8
Last change on this file since b72e847b was 7c99007, checked in by Greg Menke <gregory.menke@…>, on 06/08/06 at 18:03:55

B.Robinson MIPS patch

  • Property mode set to 100644
File size: 30.6 KB
RevLine 
[32f415d]1/*
[f198c63]2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
[32f415d]6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
[2e549dad]22 *          the baseline of the more general MIPS port. 
23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
[e6dec71c]26 *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
27 *          performance, tweaking this code and the isr vectoring routines
28 *          to reduce overhead & latencies.  Added optional
29 *          instrumentation as well.
30 *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
31 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
32 *          and deferred FP contexts.
[a37b8f95]33 *    2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing
34 *          by increasing the amount of context saved/restored.
[0c9eaef]35 *    2004: 24March, Art Ferrer, NASA/GSFC, added save of FP status/control
36 *          register to fix intermittent FP error encountered on ST5 mission
37 *          implementation on Mongoose V processor.
[5194a28]38 *    2004: April 7, Greg Menke <gregory.menke@gsfc.nasa.gov> Added __mips==32
39 *          support for R4000 processors running 32 bit code.  Fixed #define
40 *          problems that caused fpu code to always be included even when no
41 *          fpu is present.
[32f415d]42 * 
[a37b8f95]43 *  COPYRIGHT (c) 1989-2002.
[f198c63]44 *  On-Line Applications Research Corporation (OAR).
45 *
[98e4ebf5]46 *  The license and distribution terms for this file may be
47 *  found in the file LICENSE in this distribution or at
[5356c03]48 *  http://www.rtems.com/license/LICENSE.
[f198c63]49 *
[cda277f]50 *  $Id$
[f198c63]51 */
52
[b49bcfc]53#include <rtems/asm.h>
[4246571b]54#include <rtems/mips/iregdef.h>
55#include <rtems/mips/idtcpu.h>
[f198c63]56
[bd1ecb0]57#define ASSEMBLY_ONLY
58#include <rtems/score/cpu.h>
59
[5194a28]60#if TRUE
61#else
62#error TRUE is not true
63#endif
64#if FALSE
65#error FALSE is not false
66#else
67#endif
68
69/*             
70#if ( CPU_HARDWARE_FP == TRUE )
71#warning CPU_HARDWARE_FP == TRUE
72#else
73#warning CPU_HARDWARE_FP != TRUE
74#endif
75*/
76       
[bd1ecb0]77               
[d26dce2]78/* enable debugging shadow writes to misc ram, this is a vestigal
79* Mongoose-ism debug tool- but may be handy in the future so we
80* left it in...
81*/
[e6dec71c]82
[dc3848d0]83/* #define INSTRUMENT_ISR_VECTORING */
[bd1ecb0]84/* #define INSTRUMENT_EXECUTING_THREAD */
[d26dce2]85
86
87       
[2e549dad]88/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
89 *  and MIPS ISA Level 1 (R3xxx).
90 */
91
92#if __mips == 3
93/* 64 bit register operations */
[5194a28]94#define NOP     nop
[c556d0ba]95#define ADD     dadd
[5194a28]96#define STREG   sd
97#define LDREG   ld
[7c99007]98#define MFCO    dmfc0           /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
99#define MTCO    dmtc0           /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
[c556d0ba]100#define ADDU    addu
101#define ADDIU   addiu
[7c99007]102#if (__mips_fpr==32)
103#define STREGC1 swc1
104#define LDREGC1 lwc1
105#elif (__mips_fpr==64)          /* Use these instructions if there are 64 bit floating point registers. This requires FR bit to be set in C0_SR */
106#define STREGC1 sdc1
107#define LDREGC1 ldc1
108#endif
[c556d0ba]109#define R_SZ    8
110#define F_SZ    8
111#define SZ_INT  8
[f198c63]112#define SZ_INT_POW2 3
[2e549dad]113
114/* XXX if we don't always want 64 bit register ops, then another ifdef */
115
[5194a28]116#elif (__mips == 1 ) || (__mips == 32)
[2e549dad]117/* 32 bit register operations*/
[c556d0ba]118#define NOP     nop
119#define ADD     add
120#define STREG   sw
121#define LDREG   lw
122#define MFCO    mfc0
123#define MTCO    mtc0
124#define ADDU    add
125#define ADDIU   addi
[7c99007]126#define STREGC1 swc1
127#define LDREGC1 lwc1
[c556d0ba]128#define R_SZ    4
129#define F_SZ    4
130#define SZ_INT  4
[2e549dad]131#define SZ_INT_POW2 2
132#else
133#error "mips assembly: what size registers do I deal with?"
[f198c63]134#endif
135
[2e549dad]136
[c556d0ba]137#define ISR_VEC_SIZE    4
138#define EXCP_STACK_SIZE (NREGS*R_SZ)
139
140       
[f198c63]141#ifdef __GNUC__
[9fd4f5c5]142#define ASM_EXTERN(x,size) .extern x,size
[f198c63]143#else
[9fd4f5c5]144#define ASM_EXTERN(x,size)
[f198c63]145#endif
146
147/* NOTE: these constants must match the Context_Control structure in cpu.h */
148#define S0_OFFSET 0
149#define S1_OFFSET 1
150#define S2_OFFSET 2
151#define S3_OFFSET 3
152#define S4_OFFSET 4
153#define S5_OFFSET 5
154#define S6_OFFSET 6
155#define S7_OFFSET 7
156#define SP_OFFSET 8
157#define FP_OFFSET 9
158#define RA_OFFSET 10
159#define C0_SR_OFFSET 11
[8264d23]160#define C0_EPC_OFFSET 12
[f198c63]161
162/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
163#define FP0_OFFSET  0
164#define FP1_OFFSET  1
165#define FP2_OFFSET  2
166#define FP3_OFFSET  3
167#define FP4_OFFSET  4
168#define FP5_OFFSET  5
169#define FP6_OFFSET  6
170#define FP7_OFFSET  7
171#define FP8_OFFSET  8
172#define FP9_OFFSET  9
173#define FP10_OFFSET 10
174#define FP11_OFFSET 11
175#define FP12_OFFSET 12
176#define FP13_OFFSET 13
177#define FP14_OFFSET 14
178#define FP15_OFFSET 15
179#define FP16_OFFSET 16
180#define FP17_OFFSET 17
181#define FP18_OFFSET 18
182#define FP19_OFFSET 19
183#define FP20_OFFSET 20
184#define FP21_OFFSET 21
185#define FP22_OFFSET 22
186#define FP23_OFFSET 23
187#define FP24_OFFSET 24
188#define FP25_OFFSET 25
189#define FP26_OFFSET 26
190#define FP27_OFFSET 27
191#define FP28_OFFSET 28
192#define FP29_OFFSET 29
193#define FP30_OFFSET 30
194#define FP31_OFFSET 31
[0c9eaef]195#define FPCS_OFFSET 32
[f198c63]196
[8264d23]197       
198ASM_EXTERN(__exceptionStackFrame, SZ_INT)
[f198c63]199
[8264d23]200       
201               
[f198c63]202/*
203 *  _CPU_Context_save_fp_context
204 *
205 *  This routine is responsible for saving the FP context
206 *  at *fp_context_ptr.  If the point to load the FP context
207 *  from is changed then the pointer is modified by this routine.
208 *
209 *  Sometimes a macro implementation of this is in cpu.h which dereferences
210 *  the ** and a similarly named routine in this file is passed something
211 *  like a (Context_Control_fp *).  The general rule on making this decision
212 *  is to avoid writing assembly language.
213 */
214
215/* void _CPU_Context_save_fp(
[32f415d]216 *   void **fp_context_ptr
217 * );
[f198c63]218 */
219
[bd1ecb0]220#if ( CPU_HARDWARE_FP == TRUE )
[f198c63]221FRAME(_CPU_Context_save_fp,sp,0,ra)
[bd1ecb0]222        .set noreorder
[2e549dad]223        .set noat
[e6dec71c]224
225        /*
[bd1ecb0]226        ** Make sure the FPU is on before we save state.  This code
227        ** is here because the FPU context switch might occur when an
228        ** integer task is switching out with a FP task switching in.
[e6dec71c]229        */
[7c99007]230        mfc0    t0,C0_SR
[bd1ecb0]231        li      t2,SR_CU1       
232        move    t1,t0
233        or      t0,t2           /* turn on the fpu */
[7c99007]234#if (__mips == 3) || (__mips == 32)
235        li      t2,SR_IE
[bd1ecb0]236#elif __mips == 1
237        li      t2,SR_IEC
[e6dec71c]238#endif
[bd1ecb0]239        not     t2
240        and     t0,t2           /* turn off interrupts */       
[7c99007]241        mtc0    t0,C0_SR
242       
243        lw      a1,(a0)         /* get address of context storage area */
[bd1ecb0]244        move    t0,ra
245        jal     _CPU_Context_save_fp_from_exception
246        NOP
247       
248        /*
249        ** Reassert the task's state because we've not saved it yet.
250        */
[7c99007]251        mtc0    t1,C0_SR
252        j       t0
[c556d0ba]253        NOP
[bd1ecb0]254       
[a37b8f95]255        .globl _CPU_Context_save_fp_from_exception
256_CPU_Context_save_fp_from_exception:
[7c99007]257        STREGC1 $f0,FP0_OFFSET*F_SZ(a1)
258        STREGC1 $f1,FP1_OFFSET*F_SZ(a1)
259        STREGC1 $f2,FP2_OFFSET*F_SZ(a1)
260        STREGC1 $f3,FP3_OFFSET*F_SZ(a1)
261        STREGC1 $f4,FP4_OFFSET*F_SZ(a1)
262        STREGC1 $f5,FP5_OFFSET*F_SZ(a1)
263        STREGC1 $f6,FP6_OFFSET*F_SZ(a1)
264        STREGC1 $f7,FP7_OFFSET*F_SZ(a1)
265        STREGC1 $f8,FP8_OFFSET*F_SZ(a1)
266        STREGC1 $f9,FP9_OFFSET*F_SZ(a1)
267        STREGC1 $f10,FP10_OFFSET*F_SZ(a1)
268        STREGC1 $f11,FP11_OFFSET*F_SZ(a1)
269        STREGC1 $f12,FP12_OFFSET*F_SZ(a1)
270        STREGC1 $f13,FP13_OFFSET*F_SZ(a1)
271        STREGC1 $f14,FP14_OFFSET*F_SZ(a1)
272        STREGC1 $f15,FP15_OFFSET*F_SZ(a1)
273        STREGC1 $f16,FP16_OFFSET*F_SZ(a1)
274        STREGC1 $f17,FP17_OFFSET*F_SZ(a1)
275        STREGC1 $f18,FP18_OFFSET*F_SZ(a1)
276        STREGC1 $f19,FP19_OFFSET*F_SZ(a1)
277        STREGC1 $f20,FP20_OFFSET*F_SZ(a1)
278        STREGC1 $f21,FP21_OFFSET*F_SZ(a1)
279        STREGC1 $f22,FP22_OFFSET*F_SZ(a1)
280        STREGC1 $f23,FP23_OFFSET*F_SZ(a1)
281        STREGC1 $f24,FP24_OFFSET*F_SZ(a1)
282        STREGC1 $f25,FP25_OFFSET*F_SZ(a1)
283        STREGC1 $f26,FP26_OFFSET*F_SZ(a1)
284        STREGC1 $f27,FP27_OFFSET*F_SZ(a1)
285        STREGC1 $f28,FP28_OFFSET*F_SZ(a1)
286        STREGC1 $f29,FP29_OFFSET*F_SZ(a1)
287        STREGC1 $f30,FP30_OFFSET*F_SZ(a1)
288        STREGC1 $f31,FP31_OFFSET*F_SZ(a1)
[0c9eaef]289        cfc1 a0,$31                    /* Read FP status/conrol reg */
290        cfc1 a0,$31                    /* Two reads clear pipeline */
291        NOP
292        NOP
293        sw a0, FPCS_OFFSET*F_SZ(a1)    /* Store value to FPCS location */
294        NOP
[2e549dad]295        j ra
[bd1ecb0]296        NOP
[2e549dad]297        .set at
[f198c63]298ENDFRAME(_CPU_Context_save_fp)
[2e549dad]299#endif
[f198c63]300
301/*
302 *  _CPU_Context_restore_fp_context
303 *
304 *  This routine is responsible for restoring the FP context
305 *  at *fp_context_ptr.  If the point to load the FP context
306 *  from is changed then the pointer is modified by this routine.
307 *
308 *  Sometimes a macro implementation of this is in cpu.h which dereferences
309 *  the ** and a similarly named routine in this file is passed something
310 *  like a (Context_Control_fp *).  The general rule on making this decision
311 *  is to avoid writing assembly language.
312 */
313
314/* void _CPU_Context_restore_fp(
[32f415d]315 *   void **fp_context_ptr
[f198c63]316 * )
317 */
318
[bd1ecb0]319#if ( CPU_HARDWARE_FP == TRUE )
[f198c63]320FRAME(_CPU_Context_restore_fp,sp,0,ra)
[2e549dad]321        .set noat
[bd1ecb0]322        .set noreorder
[e6dec71c]323       
324        /*
325        ** Make sure the FPU is on before we retrieve state.  This code
326        ** is here because the FPU context switch might occur when an
327        ** integer task is switching out with a FP task switching in.
328        */
[7c99007]329        mfc0    t0,C0_SR
[bd1ecb0]330        li      t2,SR_CU1       
331        move    t1,t0
332        or      t0,t2           /* turn on the fpu */
[7c99007]333#if (__mips == 3) || (__mips == 32)
334        li      t2,SR_IE
[bd1ecb0]335#elif __mips == 1
336        li      t2,SR_IEC
337#endif
338        not     t2
339        and     t0,t2           /* turn off interrupts */       
[7c99007]340        mtc0    t0,C0_SR
341       
342        lw      a1,(a0)         /* get address of context storage area */
[bd1ecb0]343        move    t0,ra
344        jal     _CPU_Context_restore_fp_from_exception
[c556d0ba]345        NOP
[bd1ecb0]346
347        /*
348        ** Reassert the old task's state because we've not restored the
349        ** new one yet.
350        */
[7c99007]351        mtc0    t1,C0_SR
[bd1ecb0]352        j       t0
353        NOP
354       
[a37b8f95]355        .globl _CPU_Context_restore_fp_from_exception
356_CPU_Context_restore_fp_from_exception:
[7c99007]357        LDREGC1 $f0,FP0_OFFSET*F_SZ(a1)
358        LDREGC1 $f1,FP1_OFFSET*F_SZ(a1)
359        LDREGC1 $f2,FP2_OFFSET*F_SZ(a1)
360        LDREGC1 $f3,FP3_OFFSET*F_SZ(a1)
361        LDREGC1 $f4,FP4_OFFSET*F_SZ(a1)
362        LDREGC1 $f5,FP5_OFFSET*F_SZ(a1)
363        LDREGC1 $f6,FP6_OFFSET*F_SZ(a1)
364        LDREGC1 $f7,FP7_OFFSET*F_SZ(a1)
365        LDREGC1 $f8,FP8_OFFSET*F_SZ(a1)
366        LDREGC1 $f9,FP9_OFFSET*F_SZ(a1)
367        LDREGC1 $f10,FP10_OFFSET*F_SZ(a1)
368        LDREGC1 $f11,FP11_OFFSET*F_SZ(a1)
369        LDREGC1 $f12,FP12_OFFSET*F_SZ(a1)
370        LDREGC1 $f13,FP13_OFFSET*F_SZ(a1)
371        LDREGC1 $f14,FP14_OFFSET*F_SZ(a1)
372        LDREGC1 $f15,FP15_OFFSET*F_SZ(a1)
373        LDREGC1 $f16,FP16_OFFSET*F_SZ(a1)
374        LDREGC1 $f17,FP17_OFFSET*F_SZ(a1)
375        LDREGC1 $f18,FP18_OFFSET*F_SZ(a1)
376        LDREGC1 $f19,FP19_OFFSET*F_SZ(a1)
377        LDREGC1 $f20,FP20_OFFSET*F_SZ(a1)
378        LDREGC1 $f21,FP21_OFFSET*F_SZ(a1)
379        LDREGC1 $f22,FP22_OFFSET*F_SZ(a1)
380        LDREGC1 $f23,FP23_OFFSET*F_SZ(a1)
381        LDREGC1 $f24,FP24_OFFSET*F_SZ(a1)
382        LDREGC1 $f25,FP25_OFFSET*F_SZ(a1)
383        LDREGC1 $f26,FP26_OFFSET*F_SZ(a1)
384        LDREGC1 $f27,FP27_OFFSET*F_SZ(a1)
385        LDREGC1 $f28,FP28_OFFSET*F_SZ(a1)
386        LDREGC1 $f29,FP29_OFFSET*F_SZ(a1)
387        LDREGC1 $f30,FP30_OFFSET*F_SZ(a1)
388        LDREGC1 $f31,FP31_OFFSET*F_SZ(a1)
[0c9eaef]389        cfc1 a0,$31                  /* Read from FP status/control reg */
390        cfc1 a0,$31                  /* Two reads clear pipeline */
391        NOP                          /* NOPs ensure execution */
392        NOP
[7c99007]393        lw a0,FPCS_OFFSET*F_SZ(a1)   /* Load saved FPCS value */
[0c9eaef]394        NOP
395        ctc1 a0,$31                  /* Restore FPCS register */
396        NOP
[2e549dad]397        j ra
[bd1ecb0]398        NOP
[2e549dad]399        .set at
[f198c63]400ENDFRAME(_CPU_Context_restore_fp)
[2e549dad]401#endif
[f198c63]402
403/*  _CPU_Context_switch
404 *
405 *  This routine performs a normal non-FP context switch.
406 */
407
408/* void _CPU_Context_switch(
[32f415d]409 *   Context_Control  *run,
410 *   Context_Control  *heir
[f198c63]411 * )
412 */
413
414FRAME(_CPU_Context_switch,sp,0,ra)
[bd1ecb0]415        .set noreorder
[f198c63]416
[7c99007]417        mfc0    t0,C0_SR
[5194a28]418#if (__mips == 3) || (__mips == 32)
419        li      t1,SR_IE
[bd1ecb0]420#elif __mips == 1
421        li      t1,SR_IEC
[2e549dad]422#endif
[bd1ecb0]423        STREG   t0,C0_SR_OFFSET*R_SZ(a0)        /* save the task's SR */
424        not     t1
425        and     t0,t1                           /* mask off interrupts while we context switch */
[7c99007]426        mtc0    t0,C0_SR
[bd1ecb0]427        NOP
[f198c63]428
[bd1ecb0]429        STREG ra,RA_OFFSET*R_SZ(a0)             /* save current context */
[2e549dad]430        STREG sp,SP_OFFSET*R_SZ(a0)
431        STREG fp,FP_OFFSET*R_SZ(a0)
432        STREG s0,S0_OFFSET*R_SZ(a0)
433        STREG s1,S1_OFFSET*R_SZ(a0)
434        STREG s2,S2_OFFSET*R_SZ(a0)
435        STREG s3,S3_OFFSET*R_SZ(a0)
436        STREG s4,S4_OFFSET*R_SZ(a0)
437        STREG s5,S5_OFFSET*R_SZ(a0)
438        STREG s6,S6_OFFSET*R_SZ(a0)
439        STREG s7,S7_OFFSET*R_SZ(a0)
440
[8264d23]441       
442        /*
443        ** this code grabs the userspace EPC if we're dispatching from
[5e39823]444        ** an interrupt frame or supplies the address of the dispatch
445        ** routines if not.  This is entirely for the gdbstub's benefit so
446        ** it can know where each task is running.
[8264d23]447        **
448        ** Its value is only set when calling threadDispatch from
449        ** the interrupt handler and is cleared immediately when this
450        ** routine gets it.
451        */
452       
453        la      t0,__exceptionStackFrame        /* see if we're coming in from an exception */
454        LDREG   t1, (t0)
455        NOP
456        beqz    t1,1f
457
458        STREG   zero, (t0)                      /* and clear it */
459        NOP
460        LDREG   t0,R_EPC*R_SZ(t1)               /* get the userspace EPC from the frame */
461        b       2f
[7c99007]462        NOP
[8264d23]463               
[7c99007]4641:      la      t0,_Thread_Dispatch             /* if ==0, we're switched out */
[8264d23]465
4662:      STREG   t0,C0_EPC_OFFSET*R_SZ(a0)
467       
[f198c63]468
[2e549dad]469_CPU_Context_switch_restore:
[bd1ecb0]470        LDREG ra,RA_OFFSET*R_SZ(a1)             /* restore context */
[c556d0ba]471        LDREG sp,SP_OFFSET*R_SZ(a1)
472        LDREG fp,FP_OFFSET*R_SZ(a1)
[d26dce2]473        LDREG s0,S0_OFFSET*R_SZ(a1)
[2e549dad]474        LDREG s1,S1_OFFSET*R_SZ(a1)
475        LDREG s2,S2_OFFSET*R_SZ(a1)
476        LDREG s3,S3_OFFSET*R_SZ(a1)
477        LDREG s4,S4_OFFSET*R_SZ(a1)
478        LDREG s5,S5_OFFSET*R_SZ(a1)
479        LDREG s6,S6_OFFSET*R_SZ(a1)
480        LDREG s7,S7_OFFSET*R_SZ(a1)
[c556d0ba]481
[2e549dad]482        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
[c556d0ba]483       
[5194a28]484/*      NOP */
[7c99007]485/*#if (__mips == 3) || (__mips == 32) */
[5194a28]486/*        andi  t0,SR_EXL */
487/*        bnez  t0,_CPU_Context_1 */   /* set exception level from restore context */
488/*        li    t0,~SR_EXL */
489/*        MFC0  t1,C0_SR */
490/*        NOP */
491/*        and   t1,t0 */
492/*        MTC0  t1,C0_SR */
493/* */
494/*#elif __mips == 1 */
495/* */
496/*        andi  t0,(SR_INTERRUPT_ENABLE_BITS) */ /* we know 0 disabled */
497/*        beq   t0,$0,_CPU_Context_1  */         /* set level from restore context */
498/*        MFC0  t0,C0_SR */
499/*        NOP */
500/*        or    t0,(SR_INTERRUPT_ENABLE_BITS) */ /* new_sr = old sr with enabled  */
501/*        MTC0  t0,C0_SR */                     /* set with enabled */
502/*        NOP */
503
[bd1ecb0]504
[e6dec71c]505/*
[5194a28]506** Incorporate the incoming task's FP coprocessor state and interrupt mask/enable
507** into the status register.  We jump thru the requisite hoops to ensure we
508** maintain all other SR bits as global values.
509**
510** Get the task's FPU enable, int mask & int enable bits.  Although we keep the
511** software int enables on a per-task basis, the rtems_task_create
512** Interrupt Level & int level manipulation functions cannot enable/disable them,
513** so they are automatically enabled for all tasks.  To turn them off, a task 
514** must itself manipulate the SR register. 
515**
516** Although something of a hack on this processor, we treat the SR register
517** int enables as the RTEMS interrupt level.  We use the int level
518** value as a bitmask, not as any sort of greater than/less than metric.
519** Manipulation of a task's interrupt level corresponds directly to manipulation
520** of that task's SR bits, as seen in cpu.c
521**
522** Note, interrupts are disabled before context is saved, though the task's
523** interrupt enable state is recorded.  The task swapping in will apply its
524** specific SR bits, including interrupt enable.  If further task-specific
525** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and
526** cpu.h task initialization code that will be affected. 
527*/
[fda47cd]528
[bd1ecb0]529        li      t2,SR_CU1
530        or      t2,SR_IMASK
531
532        /* int enable bits */
[5194a28]533#if (__mips == 3) || (__mips == 32)
534        /*
535        ** Save IE
536        */
[7c99007]537        or      t2,SR_IE
[2e549dad]538#elif __mips == 1
[5e39823]539        /*
540        ** Save current, previous & old int enables.  This is key because
541        ** we can dispatch from within the stack frame used by an
542        ** interrupt service.  The int enables nest, but not beyond
543        ** previous and old because of the dispatch interlock seen
[5194a28]544        ** in the interrupt processing code.
[5e39823]545        */
546        or      t2,SR_IEC + SR_IEP + SR_IEO
[2e549dad]547#endif
[bd1ecb0]548        and     t0,t2           /* keep only the per-task bits */
[e6dec71c]549               
[7c99007]550        mfc0    t1,C0_SR        /* grab the current SR */
[bd1ecb0]551        not     t2             
[5194a28]552        and     t1,t2           /* mask off the old task's per-task bits */
[bd1ecb0]553        or      t1,t0           /* or in the new task's bits */
[7c99007]554        mtc0    t1,C0_SR        /* and load the new SR */
[e6dec71c]555        NOP
[d26dce2]556       
[e6dec71c]557/* _CPU_Context_1: */
[bd1ecb0]558        j       ra
[c556d0ba]559        NOP
[fda47cd]560ENDFRAME(_CPU_Context_switch)
561
[bd1ecb0]562       
[f198c63]563/*
564 *  _CPU_Context_restore
565 *
566 *  This routine is generally used only to restart self in an
567 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
568 *
569 *  NOTE: May be unnecessary to reload some registers.
[32f415d]570 *
571 *  void _CPU_Context_restore(
572 *    Context_Control *new_context
573 *  );
[f198c63]574 */
575
576FRAME(_CPU_Context_restore,sp,0,ra)
[bd1ecb0]577        .set noreorder
578        move    a1,a0
579        j       _CPU_Context_switch_restore
[c556d0ba]580        NOP
[bd1ecb0]581
[fda47cd]582ENDFRAME(_CPU_Context_restore)
583
[bd1ecb0]584       
[7c99007]585ASM_EXTERN(_ISR_Nest_level,4)
586ASM_EXTERN(_Thread_Dispatch_disable_level,4)
587ASM_EXTERN(_Context_Switch_necessary,4)
588ASM_EXTERN(_ISR_Signals_to_thread_executing,4)
589ASM_EXTERN(_Thread_Executing,4)
[bd1ecb0]590       
[f198c63]591.extern _Thread_Dispatch
592.extern _ISR_Vector_table
593
[8264d23]594
595       
596
597
598/*  void _DBG_Handler()
599 *
600 *  This routine services the (at least) MIPS1 debug vector,
601 *  only used the the hardware debugging features.  This code,
602 *  while optional, is best located here because its intrinsically
603 *  associated with exceptions in general & thus tied pretty
604 *  closely to _ISR_Handler.
605 *
606 */
607
608
609FRAME(_DBG_Handler,sp,0,ra)
610        .set noreorder
611        la      k0,_ISR_Handler
612        j       k0
613        NOP
614        .set reorder
615ENDFRAME(_DBG_Handler)
616
617
618
619
620       
[f198c63]621/*  void __ISR_Handler()
622 *
623 *  This routine provides the RTEMS interrupt management.
624 *
[32f415d]625 *  void _ISR_Handler()
626 *
627 *
628 *  This discussion ignores a lot of the ugly details in a real
629 *  implementation such as saving enough registers/state to be
630 *  able to do something real.  Keep in mind that the goal is
631 *  to invoke a user's ISR handler which is written in C and
632 *  uses a certain set of registers.
633 *
634 *  Also note that the exact order is to a large extent flexible.
635 *  Hardware will dictate a sequence for a certain subset of
636 *  _ISR_Handler while requirements for setting
637 *
638 *  At entry to "common" _ISR_Handler, the vector number must be
639 *  available.  On some CPUs the hardware puts either the vector
640 *  number or the offset into the vector table for this ISR in a
641 *  known place.  If the hardware does not give us this information,
642 *  then the assembly portion of RTEMS for this port will contain
643 *  a set of distinct interrupt entry points which somehow place
644 *  the vector number in a known place (which is safe if another
645 *  interrupt nests this one) and branches to _ISR_Handler.
646 *
[f198c63]647 */
648
649FRAME(_ISR_Handler,sp,0,ra)
[2e549dad]650        .set noreorder
651
652        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
653
654        /* wastes a lot of stack space for context?? */
[7c99007]655        ADDIU    sp,sp,-EXCP_STACK_SIZE
[2e549dad]656
657        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
658        STREG v0, R_V0*R_SZ(sp)
659        STREG v1, R_V1*R_SZ(sp)
660        STREG a0, R_A0*R_SZ(sp)
661        STREG a1, R_A1*R_SZ(sp)
662        STREG a2, R_A2*R_SZ(sp)
663        STREG a3, R_A3*R_SZ(sp)
664        STREG t0, R_T0*R_SZ(sp)
665        STREG t1, R_T1*R_SZ(sp)
666        STREG t2, R_T2*R_SZ(sp)
667        STREG t3, R_T3*R_SZ(sp)
668        STREG t4, R_T4*R_SZ(sp)
669        STREG t5, R_T5*R_SZ(sp)
670        STREG t6, R_T6*R_SZ(sp)
671        STREG t7, R_T7*R_SZ(sp)
[d26dce2]672        mflo  t0
[2e549dad]673        STREG t8, R_T8*R_SZ(sp)
[d26dce2]674        STREG t0, R_MDLO*R_SZ(sp)
[2e549dad]675        STREG t9, R_T9*R_SZ(sp)
[d26dce2]676        mfhi  t0
[2e549dad]677        STREG gp, R_GP*R_SZ(sp)
[d26dce2]678        STREG t0, R_MDHI*R_SZ(sp)
[c556d0ba]679        STREG fp, R_FP*R_SZ(sp)
[8264d23]680       
[2e549dad]681        .set noat
682        STREG AT, R_AT*R_SZ(sp)
683        .set at
[fda47cd]684
[7c99007]685        mfc0     t0,C0_SR
686        MFCO     t1,C0_EPC
[d26dce2]687        STREG    t0,R_SR*R_SZ(sp)
688        STREG    t1,R_EPC*R_SZ(sp)
[7c99007]689
[d26dce2]690
[e6dec71c]691#ifdef INSTRUMENT_EXECUTING_THREAD
[d26dce2]692        lw t2, _Thread_Executing
[bd1ecb0]693        NOP
[d26dce2]694        sw t2, 0x8001FFF0
695#endif
[7c99007]696
[e6dec71c]697        /* determine if an interrupt generated this exception */
[fda47cd]698
[7c99007]699        mfc0     t0,C0_CAUSE
[c556d0ba]700        NOP
[d26dce2]701
[8264d23]702        and      t1,t0,CAUSE_EXCMASK
703        beq      t1, 0, _ISR_Handler_1
[fda47cd]704
705_ISR_Handler_Exception:
[d26dce2]706
[a37b8f95]707        /*  If we return from the exception, it is assumed nothing
708         *  bad is going on and we can continue to run normally.
709         *  But we want to save the entire CPU context so exception
710         *  handlers can look at it and change it.
711         *
712         *  NOTE: This is the path the debugger stub will take.
713         */
714
[8264d23]715        /* already got t0 = cause in the interrupt test above */
716        STREG    t0,R_CAUSE*R_SZ(sp)
[a37b8f95]717
[8264d23]718        STREG    sp, R_SP*R_SZ(sp)
719       
720        STREG    s0,R_S0*R_SZ(sp)     /* save s0 - s7 */
721        STREG    s1,R_S1*R_SZ(sp)
722        STREG    s2,R_S2*R_SZ(sp)
723        STREG    s3,R_S3*R_SZ(sp)
724        STREG    s4,R_S4*R_SZ(sp)
725        STREG    s5,R_S5*R_SZ(sp)
726        STREG    s6,R_S6*R_SZ(sp)
727        STREG    s7,R_S7*R_SZ(sp)
[a37b8f95]728
729        /* CP0 special registers */
730
[25d3d4d]731#if __mips == 1
[7c99007]732        mfc0     t0,C0_TAR
[25d3d4d]733#endif
[7c99007]734        MFCO     t1,C0_BADVADDR
[25d3d4d]735       
736#if __mips == 1
[bd1ecb0]737        STREG    t0,R_TAR*R_SZ(sp)
[25d3d4d]738#else
739        NOP
740#endif
[bd1ecb0]741        STREG    t1,R_BADVADDR*R_SZ(sp)
742       
[a37b8f95]743#if ( CPU_HARDWARE_FP == TRUE )
[7c99007]744        mfc0     t0,C0_SR                 /* FPU is enabled, save state */
[bd1ecb0]745        NOP
[a37b8f95]746        srl      t0,t0,16
747        andi     t0,t0,(SR_CU1 >> 16)
748        beqz     t0, 1f
[bd1ecb0]749        NOP
[a37b8f95]750       
751        la       a1,R_F0*R_SZ(sp)
752        jal      _CPU_Context_save_fp_from_exception
[bd1ecb0]753        NOP
[7c99007]754        mfc1     t0,C1_REVISION
755        mfc1     t1,C1_STATUS
[a37b8f95]756        STREG    t0,R_FEIR*R_SZ(sp)
757        STREG    t1,R_FCSR*R_SZ(sp)
758       
7591:     
760#endif
[bd1ecb0]761       
[d26dce2]762        move     a0,sp
763        jal      mips_vector_exceptions
[bd1ecb0]764        NOP
[a37b8f95]765
[8264d23]766       
767        /*
[5194a28]768        ** Note, if the exception vector returns, rely on it to have
[8264d23]769        ** adjusted EPC so we will return to some correct address.  If
770        ** this is not done, we might get stuck in an infinite loop because
771        ** we'll return to the instruction where the exception occured and
772        ** it could throw again.
773        **
774        ** It is expected the only code using the exception processing is
775        ** either the gdb stub or some user code which is either going to
[5e39823]776        ** panic or do something useful.  Regardless, it is up to each
777        ** exception routine to properly adjust EPC, so the code below
778        ** may be helpful for doing just that.
[8264d23]779        */
780       
781/* *********************************************************************
[5e39823]782** this code follows the R3000's exception return logic, but is not
783** needed because the gdb stub does it for us.  It might be useful
784** for something else at some point...
785**
[8264d23]786        * compute the address of the instruction we'll return to *
[bd1ecb0]787
788        LDREG   t1, R_CAUSE*R_SZ(sp)
789        LDREG   t0, R_EPC*R_SZ(sp)
790
[8264d23]791        * first see if the exception happened in the delay slot *
[bd1ecb0]792        li      t3,CAUSE_BD
793        AND     t4,t1,t3
794        beqz    t4,excnodelay
795        NOP
796       
[8264d23]797        * it did, now see if the branch occured or not *
[bd1ecb0]798        li      t3,CAUSE_BT
799        AND     t4,t1,t3
800        beqz    t4,excnobranch
801        NOP
802       
[8264d23]803        * branch was taken, we resume at the branch target *
[bd1ecb0]804        LDREG   t0, R_TAR*R_SZ(sp)
805        j       excreturn
806        NOP
807
808excnobranch:
809        ADDU    t0,R_SZ
810
811excnodelay:     
812        ADDU    t0,R_SZ
813               
814excreturn:     
815        STREG   t0, R_EPC*R_SZ(sp)
816        NOP
[8264d23]817********************************************************************* */
[bd1ecb0]818       
[8264d23]819
820 /* if we're returning into mips_break, move to the next instruction */
[bd1ecb0]821       
[8264d23]822        LDREG   t0,R_EPC*R_SZ(sp)
823        la      t1,mips_break
824        xor     t2,t0,t1
825        bnez    t2,3f
826       
827        addu    t0,R_SZ
828        STREG   t0,R_EPC*R_SZ(sp)
829        NOP
8303:     
831
832       
833       
834               
[a37b8f95]835#if ( CPU_HARDWARE_FP == TRUE )
[7c99007]836        mfc0     t0,C0_SR               /* FPU is enabled, restore state */
[bd1ecb0]837        NOP
[a37b8f95]838        srl      t0,t0,16
839        andi     t0,t0,(SR_CU1 >> 16)
840        beqz     t0, 2f
[bd1ecb0]841        NOP
[a37b8f95]842       
843        la       a1,R_F0*R_SZ(sp)
844        jal      _CPU_Context_restore_fp_from_exception
[bd1ecb0]845        NOP
[a37b8f95]846        LDREG    t0,R_FEIR*R_SZ(sp)
847        LDREG    t1,R_FCSR*R_SZ(sp)
[7c99007]848        mtc1     t0,C1_REVISION
849        mtc1     t1,C1_STATUS
[a37b8f95]8502:
851#endif
[8264d23]852        LDREG    s0,R_S0*R_SZ(sp)    /* restore s0 - s7 */
853        LDREG    s1,R_S1*R_SZ(sp)
854        LDREG    s2,R_S2*R_SZ(sp)
855        LDREG    s3,R_S3*R_SZ(sp)
856        LDREG    s4,R_S4*R_SZ(sp)
857        LDREG    s5,R_S5*R_SZ(sp)
858        LDREG    s6,R_S6*R_SZ(sp)
859        LDREG    s7,R_S7*R_SZ(sp)
[a37b8f95]860
861        /* do NOT restore the sp as this could mess up the world */
862        /* do NOT restore the cause as this could mess up the world */
863
[5e39823]864        /*
865        ** Jump all the way out.  If theres a pending interrupt, just
866        ** let it be serviced later.  Since we're probably using the
867        ** gdb stub, we've already disrupted the ISR service timing
868        ** anyhow.  We oughtn't mix exception and interrupt processing
869        ** in the same exception call in case the exception stuff
870        ** might interfere with the dispatching & timer ticks.
871        */
[d26dce2]872        j        _ISR_Handler_exit
[bd1ecb0]873        NOP
[fda47cd]874
875_ISR_Handler_1:
876
[7c99007]877        mfc0     t1,C0_SR
[8264d23]878        and      t0,CAUSE_IPMASK
879        and      t0,t1
[fda47cd]880
[d26dce2]881        /* external interrupt not enabled, ignore */
882        /* but if it's not an exception or an interrupt, */
883        /* Then where did it come from??? */
884       
[8264d23]885        beq      t0,zero,_ISR_Handler_exit
[7c99007]886        NOP
887
[d26dce2]888       
[fda47cd]889  /*
890   *  save some or all context on stack
891   *  may need to save some special interrupt information for exit
892   *
893   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
894   *    if ( _ISR_Nest_level == 0 )
895   *      switch to software interrupt stack
896   *  #endif
897   */
898
[7c99007]899
[fda47cd]900  /*
901   *  _ISR_Nest_level++;
902   */
[7c99007]903        lw      t0,_ISR_Nest_level
[c556d0ba]904        NOP
[7c99007]905        add     t0,t0,1
906        sw      t0,_ISR_Nest_level
[fda47cd]907  /*
908   *  _Thread_Dispatch_disable_level++;
909   */
[7c99007]910        lw      t1,_Thread_Dispatch_disable_level
[c556d0ba]911        NOP
[7c99007]912        add     t1,t1,1
913        sw      t1,_Thread_Dispatch_disable_level
[fda47cd]914
915  /*
[797d88ba]916   *  Call the CPU model or BSP specific routine to decode the
917   *  interrupt source and actually vector to device ISR handlers.
[fda47cd]918   */
[7c99007]919
[e6dec71c]920#ifdef INSTRUMENT_ISR_VECTORING
[bd1ecb0]921        NOP
[e6dec71c]922        li      t1, 1
923        sw      t1, 0x8001e000
924#endif
925
[d26dce2]926        move     a0,sp
927        jal      mips_vector_isr_handlers
[bd1ecb0]928        NOP
[7c99007]929
[e6dec71c]930#ifdef INSTRUMENT_ISR_VECTORING
931        li      t1, 0
932        sw      t1, 0x8001e000
[bd1ecb0]933        NOP
[e6dec71c]934#endif
[7c99007]935
[fda47cd]936  /*
937   *  --_ISR_Nest_level;
938   */
[7c99007]939        lw      t2,_ISR_Nest_level
[c556d0ba]940        NOP
[7c99007]941        add     t2,t2,-1
942        sw      t2,_ISR_Nest_level
[fda47cd]943  /*
944   *  --_Thread_Dispatch_disable_level;
945   */
[7c99007]946        lw      t1,_Thread_Dispatch_disable_level
[c556d0ba]947        NOP
[7c99007]948        add     t1,t1,-1
949        sw      t1,_Thread_Dispatch_disable_level
[fda47cd]950  /*
951   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
952   *    goto the label "exit interrupt (simple case)"
953   */
[2e549dad]954        or  t0,t2,t1
955        bne t0,zero,_ISR_Handler_exit
[bd1ecb0]956        NOP
[e6dec71c]957
958
[fda47cd]959  /*
960   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
961   *    restore stack
962   *  #endif
963   * 
964   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
965   *    goto the label "exit interrupt (simple case)"
966   */
[7c99007]967        lw      t0,_Context_Switch_necessary
968        lw      t1,_ISR_Signals_to_thread_executing
[c556d0ba]969        NOP
[7c99007]970        or      t0,t0,t1
971        beq     t0,zero,_ISR_Handler_exit
[bd1ecb0]972        NOP
[c556d0ba]973
[d26dce2]974       
975       
[e6dec71c]976#ifdef INSTRUMENT_EXECUTING_THREAD
977        lw      t0,_Thread_Executing
[bd1ecb0]978        NOP
[e6dec71c]979        sw      t0,0x8001FFF4
[d26dce2]980#endif
[c556d0ba]981
[e6dec71c]982/*
983** Turn on interrupts before entering Thread_Dispatch which
984** will run for a while, thus allowing new interrupts to
985** be serviced.  Observe the Thread_Dispatch_disable_level interlock
986** that prevents recursive entry into Thread_Dispatch.
987*/
988
[7c99007]989        mfc0    t0, C0_SR
[5194a28]990#if __mips == 1
991       
[bd1ecb0]992        li      t1,SR_IEC
993        or      t0, t1
[5194a28]994       
995#elif (__mips == 3) || (__mips == 32)
996       
997        /*
998        ** clear XL and set IE so we can get interrupts.
999        */
1000        li      t1, SR_EXL
1001        not     t1
1002        and     t0,t1
1003        or      t0, SR_IE
1004       
1005#endif
[7c99007]1006        mtc0    t0, C0_SR
[e6dec71c]1007        NOP
[8264d23]1008
1009        /* save off our stack frame so the context switcher can get to it */
1010        la      t0,__exceptionStackFrame
1011        STREG   sp,(t0)
1012                                       
[e6dec71c]1013        jal     _Thread_Dispatch
1014        NOP
[d26dce2]1015
[5194a28]1016        /*
1017        ** And make sure its clear in case we didn't dispatch.  if we did, its
1018        ** already cleared
1019        */
[8264d23]1020        la      t0,__exceptionStackFrame
1021        STREG   zero,(t0)
1022        NOP
1023
[bd1ecb0]1024/*
1025** turn interrupts back off while we restore context so
[5194a28]1026** a badly timed interrupt won't mess things up
[bd1ecb0]1027*/
[7c99007]1028        mfc0    t0, C0_SR
[5194a28]1029
1030#if __mips == 1
1031 
[293c0e30]1032        /* ints off, current & prev kernel mode on (kernel mode enabled is bit clear..argh!) */
1033        li      t1,SR_IEC | SR_KUP | SR_KUC     
[bd1ecb0]1034        not     t1
1035        and     t0, t1
[7c99007]1036        mtc0    t0, C0_SR
[5194a28]1037        NOP
[293c0e30]1038
[7c99007]1039#elif (__mips == 3) || (__mips == 32)
[5e39823]1040
[7c99007]1041        /* make sure EXL and IE are set so ints are disabled & we can update EPC for the return */
1042        li   t1,SR_IE           /* Clear IE first (recommended) */
[5194a28]1043        not  t1
1044        and  t0,t1
[7c99007]1045        mtc0 t0,C0_SR
[5194a28]1046        NOP
1047       
[7c99007]1048        /* apply task's SR with EXL set so the eret will return properly */
1049        or      t0, SR_EXL | SR_IE
1050        mtc0    t0, C0_SR
1051        NOP
1052
1053        /* store new EPC value, which we can do since EXL=0 */
[5194a28]1054        LDREG   t0, R_EPC*R_SZ(sp)
[bd1ecb0]1055        NOP
[7c99007]1056        MTCO    t0, C0_EPC
[5194a28]1057        NOP
1058       
1059#endif
[7c99007]1060
1061
1062
1063
1064
1065
[e6dec71c]1066#ifdef INSTRUMENT_EXECUTING_THREAD
1067        lw      t0,_Thread_Executing
[bd1ecb0]1068        NOP
[e6dec71c]1069        sw      t0,0x8001FFF8
[d26dce2]1070#endif
1071
[c556d0ba]1072       
[fda47cd]1073  /*
1074   *  prepare to get out of interrupt
1075   *  return from interrupt  (maybe to _ISR_Dispatch)
1076   *
[c556d0ba]1077   *  LABEL "exit interrupt (simple case):"
[fda47cd]1078   *  prepare to get out of interrupt
1079   *  return from interrupt
1080   */
1081
1082_ISR_Handler_exit:
[e6dec71c]1083/*
1084** Skip the SR restore because its a global register. _CPU_Context_switch_restore
1085** adjusts it according to each task's configuration.  If we didn't dispatch, the
[bd1ecb0]1086** SR value isn't changed, so all we need to do is return.
[e6dec71c]1087**
1088*/
1089        /* restore context from stack */
1090       
1091#ifdef INSTRUMENT_EXECUTING_THREAD
[d26dce2]1092        lw      t0,_Thread_Executing
[bd1ecb0]1093        NOP
[e6dec71c]1094        sw      t0, 0x8001FFFC
[d26dce2]1095#endif
[fda47cd]1096
[8264d23]1097        LDREG t8, R_MDLO*R_SZ(sp)
[d26dce2]1098        LDREG t0, R_T0*R_SZ(sp)
[8264d23]1099        mtlo  t8
1100        LDREG t8, R_MDHI*R_SZ(sp)           
[2e549dad]1101        LDREG t1, R_T1*R_SZ(sp)
[8264d23]1102        mthi  t8
[2e549dad]1103        LDREG t2, R_T2*R_SZ(sp)
1104        LDREG t3, R_T3*R_SZ(sp)
1105        LDREG t4, R_T4*R_SZ(sp)
1106        LDREG t5, R_T5*R_SZ(sp)
1107        LDREG t6, R_T6*R_SZ(sp)
1108        LDREG t7, R_T7*R_SZ(sp)
1109        LDREG t8, R_T8*R_SZ(sp)
1110        LDREG t9, R_T9*R_SZ(sp)
1111        LDREG gp, R_GP*R_SZ(sp)
1112        LDREG fp, R_FP*R_SZ(sp)
1113        LDREG ra, R_RA*R_SZ(sp)
1114        LDREG a0, R_A0*R_SZ(sp)
1115        LDREG a1, R_A1*R_SZ(sp)
[d26dce2]1116        LDREG a2, R_A2*R_SZ(sp)
1117        LDREG a3, R_A3*R_SZ(sp)
[2e549dad]1118        LDREG v1, R_V1*R_SZ(sp)
1119        LDREG v0, R_V0*R_SZ(sp)
[d26dce2]1120       
[5194a28]1121#if __mips == 1
1122        LDREG     k1, R_EPC*R_SZ(sp)
1123#endif
1124               
[d26dce2]1125        .set noat
[e6dec71c]1126        LDREG     AT, R_AT*R_SZ(sp)
[2e549dad]1127        .set at
1128
[c556d0ba]1129        ADDIU     sp,sp,EXCP_STACK_SIZE
[5194a28]1130
1131#if (__mips == 3) || (__mips == 32)
1132        eret
1133#elif __mips == 1
1134        j         k1
1135        rfe
1136#endif
[bd1ecb0]1137        NOP
[fda47cd]1138
1139       .set    reorder
1140ENDFRAME(_ISR_Handler)
1141
[bd1ecb0]1142
[8264d23]1143
[e6dec71c]1144       
[f198c63]1145FRAME(mips_break,sp,0,ra)
[bd1ecb0]1146        .set noreorder
[8264d23]1147        break   0x0     /* this statement must be first in this function, assumed so by mips-stub.c */
1148        NOP
[bd1ecb0]1149        j       ra
1150        NOP
1151       .set    reorder
[f198c63]1152ENDFRAME(mips_break)
[5194a28]1153
Note: See TracBrowser for help on using the repository browser.