source: rtems/cpukit/score/cpu/mips/cpu_asm.S @ 9165349d

Last change on this file since 9165349d was 4fea054c, checked in by Sebastian Huber <sebastian.huber@…>, on 11/06/18 at 18:34:15

score: Remove _ISR_Dispatch()

This function was only used on some m68k variants. On these m68k
variants there is no need to use a global symbol. Use a local label
instead.

Remove _ISR_Dispatch() from the architecture-independent layer.

  • Property mode set to 100644
File size: 30.0 KB
RevLine 
[32f415d]1/*
[f198c63]2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
[32f415d]6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
[5bb38e15]22 *          the baseline of the more general MIPS port.
[2e549dad]23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
[e6dec71c]26 *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
27 *          performance, tweaking this code and the isr vectoring routines
28 *          to reduce overhead & latencies.  Added optional
29 *          instrumentation as well.
30 *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
31 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
32 *          and deferred FP contexts.
[a37b8f95]33 *    2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing
34 *          by increasing the amount of context saved/restored.
[0c9eaef]35 *    2004: 24March, Art Ferrer, NASA/GSFC, added save of FP status/control
36 *          register to fix intermittent FP error encountered on ST5 mission
37 *          implementation on Mongoose V processor.
[5194a28]38 *    2004: April 7, Greg Menke <gregory.menke@gsfc.nasa.gov> Added __mips==32
39 *          support for R4000 processors running 32 bit code.  Fixed #define
40 *          problems that caused fpu code to always be included even when no
41 *          fpu is present.
[5bb38e15]42 *
[a37b8f95]43 *  COPYRIGHT (c) 1989-2002.
[f198c63]44 *  On-Line Applications Research Corporation (OAR).
45 *
[98e4ebf5]46 *  The license and distribution terms for this file may be
47 *  found in the file LICENSE in this distribution or at
[c499856]48 *  http://www.rtems.org/license/LICENSE.
[f198c63]49 */
50
[febaa8a]51#ifdef HAVE_CONFIG_H
52#include "config.h"
53#endif
54
[b49bcfc]55#include <rtems/asm.h>
[4246571b]56#include <rtems/mips/iregdef.h>
57#include <rtems/mips/idtcpu.h>
[6d42b4c6]58#include <rtems/score/percpu.h>
[f198c63]59
[bd1ecb0]60#define ASSEMBLY_ONLY
61#include <rtems/score/cpu.h>
62
[5194a28]63#if TRUE
64#else
65#error TRUE is not true
66#endif
67#if FALSE
68#error FALSE is not false
69#else
70#endif
71
[9b5f06cd]72/*
[5194a28]73#if ( CPU_HARDWARE_FP == TRUE )
74#warning CPU_HARDWARE_FP == TRUE
75#else
76#warning CPU_HARDWARE_FP != TRUE
77#endif
78*/
[9b5f06cd]79
80
[d26dce2]81/* enable debugging shadow writes to misc ram, this is a vestigal
82* Mongoose-ism debug tool- but may be handy in the future so we
83* left it in...
84*/
[e6dec71c]85
[dc3848d0]86/* #define INSTRUMENT_ISR_VECTORING */
[bd1ecb0]87/* #define INSTRUMENT_EXECUTING_THREAD */
[d26dce2]88
89
[9b5f06cd]90
[2e549dad]91/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
92 *  and MIPS ISA Level 1 (R3xxx).
93 */
94
95#if __mips == 3
96/* 64 bit register operations */
[5194a28]97#define NOP     nop
[c556d0ba]98#define ADD     dadd
[5194a28]99#define STREG   sd
100#define LDREG   ld
[7c99007]101#define MFCO    dmfc0           /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
102#define MTCO    dmtc0           /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
[c556d0ba]103#define ADDU    addu
104#define ADDIU   addiu
[7c99007]105#if (__mips_fpr==32)
106#define STREGC1 swc1
107#define LDREGC1 lwc1
108#elif (__mips_fpr==64)          /* Use these instructions if there are 64 bit floating point registers. This requires FR bit to be set in C0_SR */
109#define STREGC1 sdc1
110#define LDREGC1 ldc1
111#endif
[c556d0ba]112#define R_SZ    8
113#define F_SZ    8
114#define SZ_INT  8
[f198c63]115#define SZ_INT_POW2 3
[2e549dad]116
117/* XXX if we don't always want 64 bit register ops, then another ifdef */
118
[5194a28]119#elif (__mips == 1 ) || (__mips == 32)
[2e549dad]120/* 32 bit register operations*/
[c556d0ba]121#define NOP     nop
122#define ADD     add
123#define STREG   sw
124#define LDREG   lw
125#define MFCO    mfc0
126#define MTCO    mtc0
127#define ADDU    add
128#define ADDIU   addi
[7c99007]129#define STREGC1 swc1
130#define LDREGC1 lwc1
[c556d0ba]131#define R_SZ    4
132#define F_SZ    4
133#define SZ_INT  4
[2e549dad]134#define SZ_INT_POW2 2
135#else
136#error "mips assembly: what size registers do I deal with?"
[f198c63]137#endif
138
[2e549dad]139
[c556d0ba]140#define ISR_VEC_SIZE    4
141#define EXCP_STACK_SIZE (NREGS*R_SZ)
142
[9b5f06cd]143
[f198c63]144#ifdef __GNUC__
[9fd4f5c5]145#define ASM_EXTERN(x,size) .extern x,size
[f198c63]146#else
[9fd4f5c5]147#define ASM_EXTERN(x,size)
[f198c63]148#endif
149
150/* NOTE: these constants must match the Context_Control structure in cpu.h */
151#define S0_OFFSET 0
152#define S1_OFFSET 1
153#define S2_OFFSET 2
154#define S3_OFFSET 3
155#define S4_OFFSET 4
156#define S5_OFFSET 5
157#define S6_OFFSET 6
158#define S7_OFFSET 7
159#define SP_OFFSET 8
160#define FP_OFFSET 9
161#define RA_OFFSET 10
162#define C0_SR_OFFSET 11
[8264d23]163#define C0_EPC_OFFSET 12
[f198c63]164
165/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
[5bb38e15]166#define FP0_OFFSET  0
167#define FP1_OFFSET  1
168#define FP2_OFFSET  2
169#define FP3_OFFSET  3
170#define FP4_OFFSET  4
171#define FP5_OFFSET  5
172#define FP6_OFFSET  6
173#define FP7_OFFSET  7
174#define FP8_OFFSET  8
175#define FP9_OFFSET  9
176#define FP10_OFFSET 10
177#define FP11_OFFSET 11
178#define FP12_OFFSET 12
179#define FP13_OFFSET 13
180#define FP14_OFFSET 14
181#define FP15_OFFSET 15
182#define FP16_OFFSET 16
183#define FP17_OFFSET 17
184#define FP18_OFFSET 18
185#define FP19_OFFSET 19
186#define FP20_OFFSET 20
187#define FP21_OFFSET 21
188#define FP22_OFFSET 22
189#define FP23_OFFSET 23
190#define FP24_OFFSET 24
191#define FP25_OFFSET 25
192#define FP26_OFFSET 26
193#define FP27_OFFSET 27
194#define FP28_OFFSET 28
195#define FP29_OFFSET 29
196#define FP30_OFFSET 30
197#define FP31_OFFSET 31
[0c9eaef]198#define FPCS_OFFSET 32
[f198c63]199
[9b5f06cd]200
[8264d23]201ASM_EXTERN(__exceptionStackFrame, SZ_INT)
[f198c63]202
203/*
204 *  _CPU_Context_save_fp_context
205 *
206 *  This routine is responsible for saving the FP context
207 *  at *fp_context_ptr.  If the point to load the FP context
208 *  from is changed then the pointer is modified by this routine.
209 *
210 *  Sometimes a macro implementation of this is in cpu.h which dereferences
211 *  the ** and a similarly named routine in this file is passed something
212 *  like a (Context_Control_fp *).  The general rule on making this decision
213 *  is to avoid writing assembly language.
214 */
215
216/* void _CPU_Context_save_fp(
[32f415d]217 *   void **fp_context_ptr
218 * );
[f198c63]219 */
220
[bd1ecb0]221#if ( CPU_HARDWARE_FP == TRUE )
[f198c63]222FRAME(_CPU_Context_save_fp,sp,0,ra)
[bd1ecb0]223        .set noreorder
[2e549dad]224        .set noat
[e6dec71c]225
[5bb38e15]226        /*
227        ** Make sure the FPU is on before we save state.  This code
228        ** is here because the FPU context switch might occur when an
[bd1ecb0]229        ** integer task is switching out with a FP task switching in.
[e6dec71c]230        */
[7c99007]231        mfc0    t0,C0_SR
[9b5f06cd]232        li      t2,SR_CU1
[bd1ecb0]233        move    t1,t0
234        or      t0,t2           /* turn on the fpu */
[7c99007]235#if (__mips == 3) || (__mips == 32)
236        li      t2,SR_IE
[bd1ecb0]237#elif __mips == 1
238        li      t2,SR_IEC
[e6dec71c]239#endif
[bd1ecb0]240        not     t2
[9b5f06cd]241        and     t0,t2           /* turn off interrupts */
[7c99007]242        mtc0    t0,C0_SR
[9b5f06cd]243
[7c99007]244        lw      a1,(a0)         /* get address of context storage area */
[bd1ecb0]245        move    t0,ra
246        jal     _CPU_Context_save_fp_from_exception
247        NOP
[9b5f06cd]248
[5bb38e15]249        /*
[bd1ecb0]250        ** Reassert the task's state because we've not saved it yet.
251        */
[7c99007]252        mtc0    t1,C0_SR
253        j       t0
[c556d0ba]254        NOP
[9b5f06cd]255
[a37b8f95]256        .globl _CPU_Context_save_fp_from_exception
257_CPU_Context_save_fp_from_exception:
[7c99007]258        STREGC1 $f0,FP0_OFFSET*F_SZ(a1)
259        STREGC1 $f1,FP1_OFFSET*F_SZ(a1)
260        STREGC1 $f2,FP2_OFFSET*F_SZ(a1)
261        STREGC1 $f3,FP3_OFFSET*F_SZ(a1)
262        STREGC1 $f4,FP4_OFFSET*F_SZ(a1)
263        STREGC1 $f5,FP5_OFFSET*F_SZ(a1)
264        STREGC1 $f6,FP6_OFFSET*F_SZ(a1)
265        STREGC1 $f7,FP7_OFFSET*F_SZ(a1)
266        STREGC1 $f8,FP8_OFFSET*F_SZ(a1)
267        STREGC1 $f9,FP9_OFFSET*F_SZ(a1)
268        STREGC1 $f10,FP10_OFFSET*F_SZ(a1)
269        STREGC1 $f11,FP11_OFFSET*F_SZ(a1)
270        STREGC1 $f12,FP12_OFFSET*F_SZ(a1)
271        STREGC1 $f13,FP13_OFFSET*F_SZ(a1)
272        STREGC1 $f14,FP14_OFFSET*F_SZ(a1)
273        STREGC1 $f15,FP15_OFFSET*F_SZ(a1)
274        STREGC1 $f16,FP16_OFFSET*F_SZ(a1)
275        STREGC1 $f17,FP17_OFFSET*F_SZ(a1)
276        STREGC1 $f18,FP18_OFFSET*F_SZ(a1)
277        STREGC1 $f19,FP19_OFFSET*F_SZ(a1)
278        STREGC1 $f20,FP20_OFFSET*F_SZ(a1)
279        STREGC1 $f21,FP21_OFFSET*F_SZ(a1)
280        STREGC1 $f22,FP22_OFFSET*F_SZ(a1)
281        STREGC1 $f23,FP23_OFFSET*F_SZ(a1)
282        STREGC1 $f24,FP24_OFFSET*F_SZ(a1)
283        STREGC1 $f25,FP25_OFFSET*F_SZ(a1)
284        STREGC1 $f26,FP26_OFFSET*F_SZ(a1)
285        STREGC1 $f27,FP27_OFFSET*F_SZ(a1)
286        STREGC1 $f28,FP28_OFFSET*F_SZ(a1)
287        STREGC1 $f29,FP29_OFFSET*F_SZ(a1)
288        STREGC1 $f30,FP30_OFFSET*F_SZ(a1)
289        STREGC1 $f31,FP31_OFFSET*F_SZ(a1)
[0c9eaef]290        cfc1 a0,$31                    /* Read FP status/conrol reg */
291        cfc1 a0,$31                    /* Two reads clear pipeline */
292        NOP
293        NOP
294        sw a0, FPCS_OFFSET*F_SZ(a1)    /* Store value to FPCS location */
295        NOP
[2e549dad]296        j ra
[bd1ecb0]297        NOP
[2e549dad]298        .set at
[f198c63]299ENDFRAME(_CPU_Context_save_fp)
[2e549dad]300#endif
[f198c63]301
302/*
303 *  _CPU_Context_restore_fp_context
304 *
305 *  This routine is responsible for restoring the FP context
306 *  at *fp_context_ptr.  If the point to load the FP context
307 *  from is changed then the pointer is modified by this routine.
308 *
309 *  Sometimes a macro implementation of this is in cpu.h which dereferences
310 *  the ** and a similarly named routine in this file is passed something
311 *  like a (Context_Control_fp *).  The general rule on making this decision
312 *  is to avoid writing assembly language.
313 */
314
315/* void _CPU_Context_restore_fp(
[32f415d]316 *   void **fp_context_ptr
[f198c63]317 * )
318 */
319
[bd1ecb0]320#if ( CPU_HARDWARE_FP == TRUE )
[f198c63]321FRAME(_CPU_Context_restore_fp,sp,0,ra)
[2e549dad]322        .set noat
[bd1ecb0]323        .set noreorder
[9b5f06cd]324
[5bb38e15]325        /*
326        ** Make sure the FPU is on before we retrieve state.  This code
327        ** is here because the FPU context switch might occur when an
[e6dec71c]328        ** integer task is switching out with a FP task switching in.
329        */
[7c99007]330        mfc0    t0,C0_SR
[9b5f06cd]331        li      t2,SR_CU1
[bd1ecb0]332        move    t1,t0
333        or      t0,t2           /* turn on the fpu */
[7c99007]334#if (__mips == 3) || (__mips == 32)
335        li      t2,SR_IE
[bd1ecb0]336#elif __mips == 1
337        li      t2,SR_IEC
338#endif
339        not     t2
[9b5f06cd]340        and     t0,t2           /* turn off interrupts */
[7c99007]341        mtc0    t0,C0_SR
[9b5f06cd]342
[7c99007]343        lw      a1,(a0)         /* get address of context storage area */
[bd1ecb0]344        move    t0,ra
345        jal     _CPU_Context_restore_fp_from_exception
[c556d0ba]346        NOP
[bd1ecb0]347
[5bb38e15]348        /*
[bd1ecb0]349        ** Reassert the old task's state because we've not restored the
350        ** new one yet.
351        */
[7c99007]352        mtc0    t1,C0_SR
[bd1ecb0]353        j       t0
354        NOP
[9b5f06cd]355
[a37b8f95]356        .globl _CPU_Context_restore_fp_from_exception
357_CPU_Context_restore_fp_from_exception:
[7c99007]358        LDREGC1 $f0,FP0_OFFSET*F_SZ(a1)
359        LDREGC1 $f1,FP1_OFFSET*F_SZ(a1)
360        LDREGC1 $f2,FP2_OFFSET*F_SZ(a1)
361        LDREGC1 $f3,FP3_OFFSET*F_SZ(a1)
362        LDREGC1 $f4,FP4_OFFSET*F_SZ(a1)
363        LDREGC1 $f5,FP5_OFFSET*F_SZ(a1)
364        LDREGC1 $f6,FP6_OFFSET*F_SZ(a1)
365        LDREGC1 $f7,FP7_OFFSET*F_SZ(a1)
366        LDREGC1 $f8,FP8_OFFSET*F_SZ(a1)
367        LDREGC1 $f9,FP9_OFFSET*F_SZ(a1)
368        LDREGC1 $f10,FP10_OFFSET*F_SZ(a1)
369        LDREGC1 $f11,FP11_OFFSET*F_SZ(a1)
370        LDREGC1 $f12,FP12_OFFSET*F_SZ(a1)
371        LDREGC1 $f13,FP13_OFFSET*F_SZ(a1)
372        LDREGC1 $f14,FP14_OFFSET*F_SZ(a1)
373        LDREGC1 $f15,FP15_OFFSET*F_SZ(a1)
374        LDREGC1 $f16,FP16_OFFSET*F_SZ(a1)
375        LDREGC1 $f17,FP17_OFFSET*F_SZ(a1)
376        LDREGC1 $f18,FP18_OFFSET*F_SZ(a1)
377        LDREGC1 $f19,FP19_OFFSET*F_SZ(a1)
378        LDREGC1 $f20,FP20_OFFSET*F_SZ(a1)
379        LDREGC1 $f21,FP21_OFFSET*F_SZ(a1)
380        LDREGC1 $f22,FP22_OFFSET*F_SZ(a1)
381        LDREGC1 $f23,FP23_OFFSET*F_SZ(a1)
382        LDREGC1 $f24,FP24_OFFSET*F_SZ(a1)
383        LDREGC1 $f25,FP25_OFFSET*F_SZ(a1)
384        LDREGC1 $f26,FP26_OFFSET*F_SZ(a1)
385        LDREGC1 $f27,FP27_OFFSET*F_SZ(a1)
386        LDREGC1 $f28,FP28_OFFSET*F_SZ(a1)
387        LDREGC1 $f29,FP29_OFFSET*F_SZ(a1)
388        LDREGC1 $f30,FP30_OFFSET*F_SZ(a1)
389        LDREGC1 $f31,FP31_OFFSET*F_SZ(a1)
[0c9eaef]390        cfc1 a0,$31                  /* Read from FP status/control reg */
391        cfc1 a0,$31                  /* Two reads clear pipeline */
392        NOP                          /* NOPs ensure execution */
393        NOP
[7c99007]394        lw a0,FPCS_OFFSET*F_SZ(a1)   /* Load saved FPCS value */
[0c9eaef]395        NOP
396        ctc1 a0,$31                  /* Restore FPCS register */
397        NOP
[2e549dad]398        j ra
[bd1ecb0]399        NOP
[2e549dad]400        .set at
[f198c63]401ENDFRAME(_CPU_Context_restore_fp)
[2e549dad]402#endif
[f198c63]403
404/*  _CPU_Context_switch
405 *
406 *  This routine performs a normal non-FP context switch.
407 */
408
409/* void _CPU_Context_switch(
[32f415d]410 *   Context_Control  *run,
411 *   Context_Control  *heir
[f198c63]412 * )
413 */
414
415FRAME(_CPU_Context_switch,sp,0,ra)
[bd1ecb0]416        .set noreorder
[f198c63]417
[7c99007]418        mfc0    t0,C0_SR
[5194a28]419#if (__mips == 3) || (__mips == 32)
420        li      t1,SR_IE
[bd1ecb0]421#elif __mips == 1
422        li      t1,SR_IEC
[2e549dad]423#endif
[bd1ecb0]424        STREG   t0,C0_SR_OFFSET*R_SZ(a0)        /* save the task's SR */
425        not     t1
426        and     t0,t1                           /* mask off interrupts while we context switch */
[7c99007]427        mtc0    t0,C0_SR
[bd1ecb0]428        NOP
[f198c63]429
[bd1ecb0]430        STREG ra,RA_OFFSET*R_SZ(a0)             /* save current context */
[2e549dad]431        STREG sp,SP_OFFSET*R_SZ(a0)
432        STREG fp,FP_OFFSET*R_SZ(a0)
433        STREG s0,S0_OFFSET*R_SZ(a0)
434        STREG s1,S1_OFFSET*R_SZ(a0)
435        STREG s2,S2_OFFSET*R_SZ(a0)
436        STREG s3,S3_OFFSET*R_SZ(a0)
437        STREG s4,S4_OFFSET*R_SZ(a0)
438        STREG s5,S5_OFFSET*R_SZ(a0)
439        STREG s6,S6_OFFSET*R_SZ(a0)
440        STREG s7,S7_OFFSET*R_SZ(a0)
441
[9b5f06cd]442
[5bb38e15]443        /*
[8264d23]444        ** this code grabs the userspace EPC if we're dispatching from
[5e39823]445        ** an interrupt frame or supplies the address of the dispatch
[5bb38e15]446        ** routines if not.  This is entirely for the gdbstub's benefit so
[5e39823]447        ** it can know where each task is running.
[8264d23]448        **
449        ** Its value is only set when calling threadDispatch from
[5bb38e15]450        ** the interrupt handler and is cleared immediately when this
[8264d23]451        ** routine gets it.
452        */
[9b5f06cd]453
[8264d23]454        la      t0,__exceptionStackFrame        /* see if we're coming in from an exception */
455        LDREG   t1, (t0)
456        NOP
457        beqz    t1,1f
458
459        STREG   zero, (t0)                      /* and clear it */
460        NOP
461        LDREG   t0,R_EPC*R_SZ(t1)               /* get the userspace EPC from the frame */
462        b       2f
[7c99007]463        NOP
[9b5f06cd]464
[7c99007]4651:      la      t0,_Thread_Dispatch             /* if ==0, we're switched out */
[8264d23]466
4672:      STREG   t0,C0_EPC_OFFSET*R_SZ(a0)
[9b5f06cd]468
[f198c63]469
[2e549dad]470_CPU_Context_switch_restore:
[bd1ecb0]471        LDREG ra,RA_OFFSET*R_SZ(a1)             /* restore context */
[c556d0ba]472        LDREG sp,SP_OFFSET*R_SZ(a1)
473        LDREG fp,FP_OFFSET*R_SZ(a1)
[d26dce2]474        LDREG s0,S0_OFFSET*R_SZ(a1)
[2e549dad]475        LDREG s1,S1_OFFSET*R_SZ(a1)
476        LDREG s2,S2_OFFSET*R_SZ(a1)
477        LDREG s3,S3_OFFSET*R_SZ(a1)
478        LDREG s4,S4_OFFSET*R_SZ(a1)
479        LDREG s5,S5_OFFSET*R_SZ(a1)
480        LDREG s6,S6_OFFSET*R_SZ(a1)
481        LDREG s7,S7_OFFSET*R_SZ(a1)
[c556d0ba]482
[2e549dad]483        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
[9b5f06cd]484
[5194a28]485/*      NOP */
[7c99007]486/*#if (__mips == 3) || (__mips == 32) */
[5194a28]487/*        andi  t0,SR_EXL */
488/*        bnez  t0,_CPU_Context_1 */   /* set exception level from restore context */
489/*        li    t0,~SR_EXL */
490/*        MFC0  t1,C0_SR */
491/*        NOP */
492/*        and   t1,t0 */
493/*        MTC0  t1,C0_SR */
494/* */
495/*#elif __mips == 1 */
496/* */
497/*        andi  t0,(SR_INTERRUPT_ENABLE_BITS) */ /* we know 0 disabled */
498/*        beq   t0,$0,_CPU_Context_1  */         /* set level from restore context */
499/*        MFC0  t0,C0_SR */
500/*        NOP */
501/*        or    t0,(SR_INTERRUPT_ENABLE_BITS) */ /* new_sr = old sr with enabled  */
502/*        MTC0  t0,C0_SR */                     /* set with enabled */
503/*        NOP */
504
[bd1ecb0]505
[e6dec71c]506/*
[5194a28]507** Incorporate the incoming task's FP coprocessor state and interrupt mask/enable
[5bb38e15]508** into the status register.  We jump thru the requisite hoops to ensure we
[5194a28]509** maintain all other SR bits as global values.
510**
[5bb38e15]511** Get the task's FPU enable, int mask & int enable bits.  Although we keep the
[5194a28]512** software int enables on a per-task basis, the rtems_task_create
[5bb38e15]513** Interrupt Level & int level manipulation functions cannot enable/disable them,
514** so they are automatically enabled for all tasks.  To turn them off, a task
515** must itself manipulate the SR register.
[5194a28]516**
517** Although something of a hack on this processor, we treat the SR register
518** int enables as the RTEMS interrupt level.  We use the int level
519** value as a bitmask, not as any sort of greater than/less than metric.
520** Manipulation of a task's interrupt level corresponds directly to manipulation
521** of that task's SR bits, as seen in cpu.c
522**
523** Note, interrupts are disabled before context is saved, though the task's
524** interrupt enable state is recorded.  The task swapping in will apply its
525** specific SR bits, including interrupt enable.  If further task-specific
526** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and
[5bb38e15]527** cpu.h task initialization code that will be affected.
[5194a28]528*/
[fda47cd]529
[bd1ecb0]530        li      t2,SR_CU1
531        or      t2,SR_IMASK
532
533        /* int enable bits */
[5194a28]534#if (__mips == 3) || (__mips == 32)
[5bb38e15]535        /*
[5194a28]536        ** Save IE
537        */
[7c99007]538        or      t2,SR_IE
[2e549dad]539#elif __mips == 1
[5bb38e15]540        /*
[5e39823]541        ** Save current, previous & old int enables.  This is key because
542        ** we can dispatch from within the stack frame used by an
543        ** interrupt service.  The int enables nest, but not beyond
544        ** previous and old because of the dispatch interlock seen
[5194a28]545        ** in the interrupt processing code.
[5e39823]546        */
547        or      t2,SR_IEC + SR_IEP + SR_IEO
[2e549dad]548#endif
[bd1ecb0]549        and     t0,t2           /* keep only the per-task bits */
[9b5f06cd]550
[7c99007]551        mfc0    t1,C0_SR        /* grab the current SR */
[9b5f06cd]552        not     t2
[5194a28]553        and     t1,t2           /* mask off the old task's per-task bits */
[bd1ecb0]554        or      t1,t0           /* or in the new task's bits */
[7c99007]555        mtc0    t1,C0_SR        /* and load the new SR */
[e6dec71c]556        NOP
[9b5f06cd]557
[e6dec71c]558/* _CPU_Context_1: */
[bd1ecb0]559        j       ra
[c556d0ba]560        NOP
[fda47cd]561ENDFRAME(_CPU_Context_switch)
562
[9b5f06cd]563
[f198c63]564/*
565 *  _CPU_Context_restore
566 *
567 *  This routine is generally used only to restart self in an
568 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
569 *
570 *  NOTE: May be unnecessary to reload some registers.
[32f415d]571 *
572 *  void _CPU_Context_restore(
573 *    Context_Control *new_context
574 *  );
[f198c63]575 */
576
577FRAME(_CPU_Context_restore,sp,0,ra)
[bd1ecb0]578        .set noreorder
579        move    a1,a0
580        j       _CPU_Context_switch_restore
[c556d0ba]581        NOP
[bd1ecb0]582
[fda47cd]583ENDFRAME(_CPU_Context_restore)
584
[f198c63]585.extern _Thread_Dispatch
586
[8264d23]587/*  void _DBG_Handler()
588 *
589 *  This routine services the (at least) MIPS1 debug vector,
590 *  only used the the hardware debugging features.  This code,
591 *  while optional, is best located here because its intrinsically
592 *  associated with exceptions in general & thus tied pretty
593 *  closely to _ISR_Handler.
594 */
595FRAME(_DBG_Handler,sp,0,ra)
596        .set noreorder
597        la      k0,_ISR_Handler
598        j       k0
599        NOP
600        .set reorder
601ENDFRAME(_DBG_Handler)
602
[f198c63]603/*  void __ISR_Handler()
604 *
605 *  This routine provides the RTEMS interrupt management.
606 *
[32f415d]607 *  void _ISR_Handler()
[5bb38e15]608 *
[32f415d]609 *
610 *  This discussion ignores a lot of the ugly details in a real
611 *  implementation such as saving enough registers/state to be
612 *  able to do something real.  Keep in mind that the goal is
613 *  to invoke a user's ISR handler which is written in C and
614 *  uses a certain set of registers.
615 *
616 *  Also note that the exact order is to a large extent flexible.
617 *  Hardware will dictate a sequence for a certain subset of
618 *  _ISR_Handler while requirements for setting
619 *
620 *  At entry to "common" _ISR_Handler, the vector number must be
621 *  available.  On some CPUs the hardware puts either the vector
622 *  number or the offset into the vector table for this ISR in a
623 *  known place.  If the hardware does not give us this information,
624 *  then the assembly portion of RTEMS for this port will contain
625 *  a set of distinct interrupt entry points which somehow place
626 *  the vector number in a known place (which is safe if another
627 *  interrupt nests this one) and branches to _ISR_Handler.
628 *
[f198c63]629 */
630
631FRAME(_ISR_Handler,sp,0,ra)
[2e549dad]632        .set noreorder
633
634        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
635
636        /* wastes a lot of stack space for context?? */
[7c99007]637        ADDIU    sp,sp,-EXCP_STACK_SIZE
[2e549dad]638
[5bb38e15]639        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
[2e549dad]640        STREG v0, R_V0*R_SZ(sp)
641        STREG v1, R_V1*R_SZ(sp)
642        STREG a0, R_A0*R_SZ(sp)
643        STREG a1, R_A1*R_SZ(sp)
644        STREG a2, R_A2*R_SZ(sp)
645        STREG a3, R_A3*R_SZ(sp)
646        STREG t0, R_T0*R_SZ(sp)
647        STREG t1, R_T1*R_SZ(sp)
648        STREG t2, R_T2*R_SZ(sp)
649        STREG t3, R_T3*R_SZ(sp)
650        STREG t4, R_T4*R_SZ(sp)
651        STREG t5, R_T5*R_SZ(sp)
652        STREG t6, R_T6*R_SZ(sp)
653        STREG t7, R_T7*R_SZ(sp)
[d26dce2]654        mflo  t0
[2e549dad]655        STREG t8, R_T8*R_SZ(sp)
[5bb38e15]656        STREG t0, R_MDLO*R_SZ(sp)
[2e549dad]657        STREG t9, R_T9*R_SZ(sp)
[d26dce2]658        mfhi  t0
[2e549dad]659        STREG gp, R_GP*R_SZ(sp)
[5bb38e15]660        STREG t0, R_MDHI*R_SZ(sp)
[c556d0ba]661        STREG fp, R_FP*R_SZ(sp)
[9b5f06cd]662
[2e549dad]663        .set noat
664        STREG AT, R_AT*R_SZ(sp)
665        .set at
[fda47cd]666
[7c99007]667        mfc0     t0,C0_SR
668        MFCO     t1,C0_EPC
[d26dce2]669        STREG    t0,R_SR*R_SZ(sp)
670        STREG    t1,R_EPC*R_SZ(sp)
[7c99007]671
[d26dce2]672
[e6dec71c]673#ifdef INSTRUMENT_EXECUTING_THREAD
[6d42b4c6]674        lw t2, THREAD_EXECUTING
[bd1ecb0]675        NOP
[d26dce2]676        sw t2, 0x8001FFF0
677#endif
[7c99007]678
[e6dec71c]679        /* determine if an interrupt generated this exception */
[fda47cd]680
[7c99007]681        mfc0     t0,C0_CAUSE
[c556d0ba]682        NOP
[d26dce2]683
[8264d23]684        and      t1,t0,CAUSE_EXCMASK
685        beq      t1, 0, _ISR_Handler_1
[fda47cd]686
687_ISR_Handler_Exception:
[d26dce2]688
[a37b8f95]689        /*  If we return from the exception, it is assumed nothing
690         *  bad is going on and we can continue to run normally.
691         *  But we want to save the entire CPU context so exception
692         *  handlers can look at it and change it.
693         *
694         *  NOTE: This is the path the debugger stub will take.
695         */
696
[8264d23]697        /* already got t0 = cause in the interrupt test above */
698        STREG    t0,R_CAUSE*R_SZ(sp)
[a37b8f95]699
[8264d23]700        STREG    sp, R_SP*R_SZ(sp)
[9b5f06cd]701
[8264d23]702        STREG    s0,R_S0*R_SZ(sp)     /* save s0 - s7 */
703        STREG    s1,R_S1*R_SZ(sp)
704        STREG    s2,R_S2*R_SZ(sp)
705        STREG    s3,R_S3*R_SZ(sp)
706        STREG    s4,R_S4*R_SZ(sp)
707        STREG    s5,R_S5*R_SZ(sp)
708        STREG    s6,R_S6*R_SZ(sp)
709        STREG    s7,R_S7*R_SZ(sp)
[a37b8f95]710
711        /* CP0 special registers */
712
[25d3d4d]713#if __mips == 1
[7c99007]714        mfc0     t0,C0_TAR
[25d3d4d]715#endif
[7c99007]716        MFCO     t1,C0_BADVADDR
[9b5f06cd]717
[25d3d4d]718#if __mips == 1
[bd1ecb0]719        STREG    t0,R_TAR*R_SZ(sp)
[25d3d4d]720#else
721        NOP
722#endif
[bd1ecb0]723        STREG    t1,R_BADVADDR*R_SZ(sp)
[9b5f06cd]724
[a37b8f95]725#if ( CPU_HARDWARE_FP == TRUE )
[7c99007]726        mfc0     t0,C0_SR                 /* FPU is enabled, save state */
[bd1ecb0]727        NOP
[a37b8f95]728        srl      t0,t0,16
729        andi     t0,t0,(SR_CU1 >> 16)
730        beqz     t0, 1f
[bd1ecb0]731        NOP
[5bb38e15]732
[a37b8f95]733        la       a1,R_F0*R_SZ(sp)
[5bb38e15]734        jal      _CPU_Context_save_fp_from_exception
[bd1ecb0]735        NOP
[7c99007]736        mfc1     t0,C1_REVISION
737        mfc1     t1,C1_STATUS
[a37b8f95]738        STREG    t0,R_FEIR*R_SZ(sp)
739        STREG    t1,R_FCSR*R_SZ(sp)
[5bb38e15]740
7411:
[a37b8f95]742#endif
[9b5f06cd]743
[d26dce2]744        move     a0,sp
745        jal      mips_vector_exceptions
[bd1ecb0]746        NOP
[a37b8f95]747
[9b5f06cd]748
[5bb38e15]749        /*
[5194a28]750        ** Note, if the exception vector returns, rely on it to have
[8264d23]751        ** adjusted EPC so we will return to some correct address.  If
[5bb38e15]752        ** this is not done, we might get stuck in an infinite loop because
[8264d23]753        ** we'll return to the instruction where the exception occured and
754        ** it could throw again.
755        **
756        ** It is expected the only code using the exception processing is
757        ** either the gdb stub or some user code which is either going to
[5e39823]758        ** panic or do something useful.  Regardless, it is up to each
759        ** exception routine to properly adjust EPC, so the code below
760        ** may be helpful for doing just that.
[8264d23]761        */
[9b5f06cd]762
[8264d23]763/* *********************************************************************
[5e39823]764** this code follows the R3000's exception return logic, but is not
765** needed because the gdb stub does it for us.  It might be useful
766** for something else at some point...
767**
[8264d23]768        * compute the address of the instruction we'll return to *
[bd1ecb0]769
770        LDREG   t1, R_CAUSE*R_SZ(sp)
771        LDREG   t0, R_EPC*R_SZ(sp)
772
[8264d23]773        * first see if the exception happened in the delay slot *
[bd1ecb0]774        li      t3,CAUSE_BD
775        AND     t4,t1,t3
776        beqz    t4,excnodelay
777        NOP
[9b5f06cd]778
[8264d23]779        * it did, now see if the branch occured or not *
[bd1ecb0]780        li      t3,CAUSE_BT
781        AND     t4,t1,t3
782        beqz    t4,excnobranch
783        NOP
[9b5f06cd]784
[8264d23]785        * branch was taken, we resume at the branch target *
[bd1ecb0]786        LDREG   t0, R_TAR*R_SZ(sp)
787        j       excreturn
788        NOP
789
790excnobranch:
791        ADDU    t0,R_SZ
792
[9b5f06cd]793excnodelay:
[bd1ecb0]794        ADDU    t0,R_SZ
[9b5f06cd]795
796excreturn:
[bd1ecb0]797        STREG   t0, R_EPC*R_SZ(sp)
798        NOP
[8264d23]799********************************************************************* */
[9b5f06cd]800
[8264d23]801
802 /* if we're returning into mips_break, move to the next instruction */
[9b5f06cd]803
[8264d23]804        LDREG   t0,R_EPC*R_SZ(sp)
805        la      t1,mips_break
806        xor     t2,t0,t1
807        bnez    t2,3f
[9b5f06cd]808
[8264d23]809        addu    t0,R_SZ
810        STREG   t0,R_EPC*R_SZ(sp)
811        NOP
[9b5f06cd]8123:
813
814
815
[8264d23]816
[a37b8f95]817#if ( CPU_HARDWARE_FP == TRUE )
[7c99007]818        mfc0     t0,C0_SR               /* FPU is enabled, restore state */
[bd1ecb0]819        NOP
[a37b8f95]820        srl      t0,t0,16
821        andi     t0,t0,(SR_CU1 >> 16)
822        beqz     t0, 2f
[bd1ecb0]823        NOP
[5bb38e15]824
[a37b8f95]825        la       a1,R_F0*R_SZ(sp)
826        jal      _CPU_Context_restore_fp_from_exception
[bd1ecb0]827        NOP
[a37b8f95]828        LDREG    t0,R_FEIR*R_SZ(sp)
829        LDREG    t1,R_FCSR*R_SZ(sp)
[7c99007]830        mtc1     t0,C1_REVISION
831        mtc1     t1,C1_STATUS
[a37b8f95]8322:
833#endif
[8264d23]834        LDREG    s0,R_S0*R_SZ(sp)    /* restore s0 - s7 */
835        LDREG    s1,R_S1*R_SZ(sp)
836        LDREG    s2,R_S2*R_SZ(sp)
837        LDREG    s3,R_S3*R_SZ(sp)
838        LDREG    s4,R_S4*R_SZ(sp)
839        LDREG    s5,R_S5*R_SZ(sp)
840        LDREG    s6,R_S6*R_SZ(sp)
841        LDREG    s7,R_S7*R_SZ(sp)
[a37b8f95]842
843        /* do NOT restore the sp as this could mess up the world */
844        /* do NOT restore the cause as this could mess up the world */
845
[5bb38e15]846        /*
[5e39823]847        ** Jump all the way out.  If theres a pending interrupt, just
[5bb38e15]848        ** let it be serviced later.  Since we're probably using the
[5e39823]849        ** gdb stub, we've already disrupted the ISR service timing
850        ** anyhow.  We oughtn't mix exception and interrupt processing
851        ** in the same exception call in case the exception stuff
852        ** might interfere with the dispatching & timer ticks.
853        */
[d26dce2]854        j        _ISR_Handler_exit
[bd1ecb0]855        NOP
[fda47cd]856
857_ISR_Handler_1:
858
[7c99007]859        mfc0     t1,C0_SR
[8264d23]860        and      t0,CAUSE_IPMASK
861        and      t0,t1
[fda47cd]862
[d26dce2]863        /* external interrupt not enabled, ignore */
864        /* but if it's not an exception or an interrupt, */
865        /* Then where did it come from??? */
[9b5f06cd]866
[8264d23]867        beq      t0,zero,_ISR_Handler_exit
[7c99007]868        NOP
869
[9b5f06cd]870
[fda47cd]871  /*
872   *  save some or all context on stack
873   *  may need to save some special interrupt information for exit
874   *
[511dc4b]875   *  if ( _ISR_Nest_level == 0 )
876   *    switch to software interrupt stack
[fda47cd]877   */
878
[7c99007]879
[fda47cd]880  /*
881   *  _ISR_Nest_level++;
882   */
[6d42b4c6]883        lw      t0,ISR_NEST_LEVEL
[c556d0ba]884        NOP
[7c99007]885        add     t0,t0,1
[6d42b4c6]886        sw      t0,ISR_NEST_LEVEL
[fda47cd]887  /*
888   *  _Thread_Dispatch_disable_level++;
889   */
[d19cce29]890        lw      t1,THREAD_DISPATCH_DISABLE_LEVEL
[c556d0ba]891        NOP
[7c99007]892        add     t1,t1,1
[d19cce29]893        sw      t1,THREAD_DISPATCH_DISABLE_LEVEL
[fda47cd]894
895  /*
[797d88ba]896   *  Call the CPU model or BSP specific routine to decode the
897   *  interrupt source and actually vector to device ISR handlers.
[fda47cd]898   */
[7c99007]899
[e6dec71c]900#ifdef INSTRUMENT_ISR_VECTORING
[bd1ecb0]901        NOP
[e6dec71c]902        li      t1, 1
903        sw      t1, 0x8001e000
904#endif
905
[d26dce2]906        move     a0,sp
907        jal      mips_vector_isr_handlers
[bd1ecb0]908        NOP
[7c99007]909
[e6dec71c]910#ifdef INSTRUMENT_ISR_VECTORING
911        li      t1, 0
912        sw      t1, 0x8001e000
[bd1ecb0]913        NOP
[e6dec71c]914#endif
[7c99007]915
[fda47cd]916  /*
917   *  --_ISR_Nest_level;
918   */
[6d42b4c6]919        lw      t2,ISR_NEST_LEVEL
[c556d0ba]920        NOP
[7c99007]921        add     t2,t2,-1
[6d42b4c6]922        sw      t2,ISR_NEST_LEVEL
[fda47cd]923  /*
924   *  --_Thread_Dispatch_disable_level;
925   */
[d19cce29]926        lw      t1,THREAD_DISPATCH_DISABLE_LEVEL
[c556d0ba]927        NOP
[7c99007]928        add     t1,t1,-1
[d19cce29]929        sw      t1,THREAD_DISPATCH_DISABLE_LEVEL
[fda47cd]930  /*
931   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
932   *    goto the label "exit interrupt (simple case)"
933   */
[2e549dad]934        or  t0,t2,t1
935        bne t0,zero,_ISR_Handler_exit
[bd1ecb0]936        NOP
[e6dec71c]937
938
[fda47cd]939  /*
[511dc4b]940   *  restore stack
[5bb38e15]941   *
[bfc76f9]942   *  if !_Thread_Dispatch_necessary
[fda47cd]943   *    goto the label "exit interrupt (simple case)"
944   */
[6d42b4c6]945        lbu     t0,DISPATCH_NEEDED
[c556d0ba]946        NOP
[6d42b4c6]947        or      t0,t0,t0
[7c99007]948        beq     t0,zero,_ISR_Handler_exit
[bd1ecb0]949        NOP
[c556d0ba]950
[9b5f06cd]951
952
[e6dec71c]953#ifdef INSTRUMENT_EXECUTING_THREAD
[6d42b4c6]954        lw      t0,THREAD_EXECUTING
[bd1ecb0]955        NOP
[e6dec71c]956        sw      t0,0x8001FFF4
[d26dce2]957#endif
[c556d0ba]958
[e6dec71c]959/*
960** Turn on interrupts before entering Thread_Dispatch which
961** will run for a while, thus allowing new interrupts to
962** be serviced.  Observe the Thread_Dispatch_disable_level interlock
963** that prevents recursive entry into Thread_Dispatch.
964*/
965
[7c99007]966        mfc0    t0, C0_SR
[5194a28]967#if __mips == 1
[9b5f06cd]968
[bd1ecb0]969        li      t1,SR_IEC
970        or      t0, t1
[9b5f06cd]971
[5194a28]972#elif (__mips == 3) || (__mips == 32)
[9b5f06cd]973
[5bb38e15]974        /*
[5194a28]975        ** clear XL and set IE so we can get interrupts.
976        */
977        li      t1, SR_EXL
978        not     t1
979        and     t0,t1
980        or      t0, SR_IE
[9b5f06cd]981
[5194a28]982#endif
[7c99007]983        mtc0    t0, C0_SR
[e6dec71c]984        NOP
[8264d23]985
986        /* save off our stack frame so the context switcher can get to it */
987        la      t0,__exceptionStackFrame
988        STREG   sp,(t0)
[9b5f06cd]989
[e6dec71c]990        jal     _Thread_Dispatch
991        NOP
[d26dce2]992
[5bb38e15]993        /*
[5194a28]994        ** And make sure its clear in case we didn't dispatch.  if we did, its
[5bb38e15]995        ** already cleared
[5194a28]996        */
[8264d23]997        la      t0,__exceptionStackFrame
998        STREG   zero,(t0)
999        NOP
1000
[5bb38e15]1001/*
[bd1ecb0]1002** turn interrupts back off while we restore context so
[5194a28]1003** a badly timed interrupt won't mess things up
[bd1ecb0]1004*/
[7c99007]1005        mfc0    t0, C0_SR
[5194a28]1006
1007#if __mips == 1
[5bb38e15]1008
[293c0e30]1009        /* ints off, current & prev kernel mode on (kernel mode enabled is bit clear..argh!) */
[9b5f06cd]1010        li      t1,SR_IEC | SR_KUP | SR_KUC
[bd1ecb0]1011        not     t1
1012        and     t0, t1
[7c99007]1013        mtc0    t0, C0_SR
[5194a28]1014        NOP
[293c0e30]1015
[7c99007]1016#elif (__mips == 3) || (__mips == 32)
[5e39823]1017
[5bb38e15]1018        /* make sure EXL and IE are set so ints are disabled & we can update EPC for the return */
[7c99007]1019        li   t1,SR_IE           /* Clear IE first (recommended) */
[5bb38e15]1020        not  t1
1021        and  t0,t1
[7c99007]1022        mtc0 t0,C0_SR
[5194a28]1023        NOP
[9b5f06cd]1024 
[7c99007]1025        /* apply task's SR with EXL set so the eret will return properly */
1026        or      t0, SR_EXL | SR_IE
1027        mtc0    t0, C0_SR
1028        NOP
1029
1030        /* store new EPC value, which we can do since EXL=0 */
[5194a28]1031        LDREG   t0, R_EPC*R_SZ(sp)
[bd1ecb0]1032        NOP
[7c99007]1033        MTCO    t0, C0_EPC
[5194a28]1034        NOP
[9b5f06cd]1035 
[5194a28]1036#endif
[7c99007]1037
1038
1039
1040
1041
1042
[e6dec71c]1043#ifdef INSTRUMENT_EXECUTING_THREAD
[6d42b4c6]1044        lw      t0,THREAD_EXECUTING
[bd1ecb0]1045        NOP
[e6dec71c]1046        sw      t0,0x8001FFF8
[d26dce2]1047#endif
1048
[9b5f06cd]1049
[fda47cd]1050  /*
1051   *  prepare to get out of interrupt
[4fea054c]1052   *  return from interrupt
[fda47cd]1053   *
[c556d0ba]1054   *  LABEL "exit interrupt (simple case):"
[fda47cd]1055   *  prepare to get out of interrupt
1056   *  return from interrupt
1057   */
1058
1059_ISR_Handler_exit:
[e6dec71c]1060/*
1061** Skip the SR restore because its a global register. _CPU_Context_switch_restore
1062** adjusts it according to each task's configuration.  If we didn't dispatch, the
[bd1ecb0]1063** SR value isn't changed, so all we need to do is return.
[e6dec71c]1064**
1065*/
1066        /* restore context from stack */
[9b5f06cd]1067
[e6dec71c]1068#ifdef INSTRUMENT_EXECUTING_THREAD
[6d42b4c6]1069        lw      t0,THREAD_EXECUTING
[bd1ecb0]1070        NOP
[e6dec71c]1071        sw      t0, 0x8001FFFC
[d26dce2]1072#endif
[fda47cd]1073
[8264d23]1074        LDREG t8, R_MDLO*R_SZ(sp)
[d26dce2]1075        LDREG t0, R_T0*R_SZ(sp)
[8264d23]1076        mtlo  t8
[5bb38e15]1077        LDREG t8, R_MDHI*R_SZ(sp)
[2e549dad]1078        LDREG t1, R_T1*R_SZ(sp)
[8264d23]1079        mthi  t8
[2e549dad]1080        LDREG t2, R_T2*R_SZ(sp)
1081        LDREG t3, R_T3*R_SZ(sp)
1082        LDREG t4, R_T4*R_SZ(sp)
1083        LDREG t5, R_T5*R_SZ(sp)
1084        LDREG t6, R_T6*R_SZ(sp)
1085        LDREG t7, R_T7*R_SZ(sp)
1086        LDREG t8, R_T8*R_SZ(sp)
1087        LDREG t9, R_T9*R_SZ(sp)
1088        LDREG gp, R_GP*R_SZ(sp)
1089        LDREG fp, R_FP*R_SZ(sp)
1090        LDREG ra, R_RA*R_SZ(sp)
1091        LDREG a0, R_A0*R_SZ(sp)
1092        LDREG a1, R_A1*R_SZ(sp)
[d26dce2]1093        LDREG a2, R_A2*R_SZ(sp)
1094        LDREG a3, R_A3*R_SZ(sp)
[2e549dad]1095        LDREG v1, R_V1*R_SZ(sp)
1096        LDREG v0, R_V0*R_SZ(sp)
[9b5f06cd]1097
[5194a28]1098#if __mips == 1
1099        LDREG     k1, R_EPC*R_SZ(sp)
1100#endif
[9b5f06cd]1101
[d26dce2]1102        .set noat
[e6dec71c]1103        LDREG     AT, R_AT*R_SZ(sp)
[2e549dad]1104        .set at
1105
[c556d0ba]1106        ADDIU     sp,sp,EXCP_STACK_SIZE
[5194a28]1107
1108#if (__mips == 3) || (__mips == 32)
1109        eret
1110#elif __mips == 1
1111        j         k1
1112        rfe
1113#endif
[bd1ecb0]1114        NOP
[fda47cd]1115
1116       .set    reorder
1117ENDFRAME(_ISR_Handler)
1118
[bd1ecb0]1119
[f198c63]1120FRAME(mips_break,sp,0,ra)
[bd1ecb0]1121        .set noreorder
[8264d23]1122        break   0x0     /* this statement must be first in this function, assumed so by mips-stub.c */
1123        NOP
[bd1ecb0]1124        j       ra
1125        NOP
1126       .set    reorder
[f198c63]1127ENDFRAME(mips_break)
[5194a28]1128
Note: See TracBrowser for help on using the repository browser.