source: rtems/cpukit/score/cpu/mips/cpu_asm.S @ a37b8f95

4.104.114.84.95
Last change on this file since a37b8f95 was a37b8f95, checked in by Joel Sherrill <joel.sherrill@…>, on 02/05/02 at 21:04:39

2001-02-05 Joel Sherrill <joel@…>

  • cpu_asm.S: Enhanced to save/restore more registers on exceptions.
  • rtems/score/cpu.h (CPU_Interrupt_frame): Enhanced to list every register individually and document when it is saved.
  • idtcpu.h: Added constants for the coprocessor 1 registers revision and status.
  • Property mode set to 100644
File size: 22.4 KB
Line 
1/*
2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
22 *          the baseline of the more general MIPS port. 
23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
26 *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
27 *          performance, tweaking this code and the isr vectoring routines
28 *          to reduce overhead & latencies.  Added optional
29 *          instrumentation as well.
30 *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
31 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
32 *          and deferred FP contexts.
33 *    2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing
34 *          by increasing the amount of context saved/restored.
35 * 
36 *  COPYRIGHT (c) 1989-2002.
37 *  On-Line Applications Research Corporation (OAR).
38 *
39 *  The license and distribution terms for this file may be
40 *  found in the file LICENSE in this distribution or at
41 *  http://www.OARcorp.com/rtems/license.html.
42 *
43 *  $Id$
44 */
45
46#include <asm.h>
47#include "iregdef.h"
48#include "idtcpu.h"
49
50/* enable debugging shadow writes to misc ram, this is a vestigal
51* Mongoose-ism debug tool- but may be handy in the future so we
52* left it in...
53*/
54
55#define INSTRUMENT_ISR_VECTORING
56//#define INSTRUMENT_EXECUTING_THREAD
57
58
59       
60/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
61 *  and MIPS ISA Level 1 (R3xxx).
62 */
63
64#if __mips == 3
65/* 64 bit register operations */
66#define NOP     
67#define ADD     dadd
68#define STREG   sd
69#define LDREG   ld
70#define MFCO    dmfc0
71#define MTCO    dmtc0
72#define ADDU    addu
73#define ADDIU   addiu
74#define R_SZ    8
75#define F_SZ    8
76#define SZ_INT  8
77#define SZ_INT_POW2 3
78
79/* XXX if we don't always want 64 bit register ops, then another ifdef */
80
81#elif __mips == 1
82/* 32 bit register operations*/
83#define NOP     nop
84#define ADD     add
85#define STREG   sw
86#define LDREG   lw
87#define MFCO    mfc0
88#define MTCO    mtc0
89#define ADDU    add
90#define ADDIU   addi
91#define R_SZ    4
92#define F_SZ    4
93#define SZ_INT  4
94#define SZ_INT_POW2 2
95#else
96#error "mips assembly: what size registers do I deal with?"
97#endif
98
99
100#define ISR_VEC_SIZE    4
101#define EXCP_STACK_SIZE (NREGS*R_SZ)
102
103       
104#ifdef __GNUC__
105#define ASM_EXTERN(x,size) .extern x,size
106#else
107#define ASM_EXTERN(x,size)
108#endif
109
110/* NOTE: these constants must match the Context_Control structure in cpu.h */
111#define S0_OFFSET 0
112#define S1_OFFSET 1
113#define S2_OFFSET 2
114#define S3_OFFSET 3
115#define S4_OFFSET 4
116#define S5_OFFSET 5
117#define S6_OFFSET 6
118#define S7_OFFSET 7
119#define SP_OFFSET 8
120#define FP_OFFSET 9
121#define RA_OFFSET 10
122#define C0_SR_OFFSET 11
123/* #define C0_EPC_OFFSET 12 */
124
125/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
126#define FP0_OFFSET  0
127#define FP1_OFFSET  1
128#define FP2_OFFSET  2
129#define FP3_OFFSET  3
130#define FP4_OFFSET  4
131#define FP5_OFFSET  5
132#define FP6_OFFSET  6
133#define FP7_OFFSET  7
134#define FP8_OFFSET  8
135#define FP9_OFFSET  9
136#define FP10_OFFSET 10
137#define FP11_OFFSET 11
138#define FP12_OFFSET 12
139#define FP13_OFFSET 13
140#define FP14_OFFSET 14
141#define FP15_OFFSET 15
142#define FP16_OFFSET 16
143#define FP17_OFFSET 17
144#define FP18_OFFSET 18
145#define FP19_OFFSET 19
146#define FP20_OFFSET 20
147#define FP21_OFFSET 21
148#define FP22_OFFSET 22
149#define FP23_OFFSET 23
150#define FP24_OFFSET 24
151#define FP25_OFFSET 25
152#define FP26_OFFSET 26
153#define FP27_OFFSET 27
154#define FP28_OFFSET 28
155#define FP29_OFFSET 29
156#define FP30_OFFSET 30
157#define FP31_OFFSET 31
158
159
160/*
161 *  _CPU_Context_save_fp_context
162 *
163 *  This routine is responsible for saving the FP context
164 *  at *fp_context_ptr.  If the point to load the FP context
165 *  from is changed then the pointer is modified by this routine.
166 *
167 *  Sometimes a macro implementation of this is in cpu.h which dereferences
168 *  the ** and a similarly named routine in this file is passed something
169 *  like a (Context_Control_fp *).  The general rule on making this decision
170 *  is to avoid writing assembly language.
171 */
172
173/* void _CPU_Context_save_fp(
174 *   void **fp_context_ptr
175 * );
176 */
177
178#if ( CPU_HARDWARE_FP == FALSE )
179FRAME(_CPU_Context_save_fp,sp,0,ra)
180        .set noat
181
182#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
183        /*
184        ** Make sure the FPU is on before we save state.  This code is here
185        ** because the FPU context switch might occur when an integer
186        ** task is switching out w/ an FP task switching in, but the current
187        ** FPU state was left by a sometime previously scheduled FP task.
188        **
189        ** In non-deferred FP context switch, if the exiting task is FP, then
190        ** the FPU is already on so we don't need to do this.
191        */
192       
193        MFC0    t0,C0_SR       
194        li      k0,SR_CU1       
195        or      t0,k0
196        MTC0    t0,C0_SR       
197#endif
198               
199        ld   a1,(a0)
200        NOP
201        .globl _CPU_Context_save_fp_from_exception
202_CPU_Context_save_fp_from_exception:
203        swc1 $f0,FP0_OFFSET*F_SZ(a1)
204        swc1 $f1,FP1_OFFSET*F_SZ(a1)
205        swc1 $f2,FP2_OFFSET*F_SZ(a1)
206        swc1 $f3,FP3_OFFSET*F_SZ(a1)
207        swc1 $f4,FP4_OFFSET*F_SZ(a1)
208        swc1 $f5,FP5_OFFSET*F_SZ(a1)
209        swc1 $f6,FP6_OFFSET*F_SZ(a1)
210        swc1 $f7,FP7_OFFSET*F_SZ(a1)
211        swc1 $f8,FP8_OFFSET*F_SZ(a1)
212        swc1 $f9,FP9_OFFSET*F_SZ(a1)
213        swc1 $f10,FP10_OFFSET*F_SZ(a1)
214        swc1 $f11,FP11_OFFSET*F_SZ(a1)
215        swc1 $f12,FP12_OFFSET*F_SZ(a1)
216        swc1 $f13,FP13_OFFSET*F_SZ(a1)
217        swc1 $f14,FP14_OFFSET*F_SZ(a1)
218        swc1 $f15,FP15_OFFSET*F_SZ(a1)
219        swc1 $f16,FP16_OFFSET*F_SZ(a1)
220        swc1 $f17,FP17_OFFSET*F_SZ(a1)
221        swc1 $f18,FP18_OFFSET*F_SZ(a1)
222        swc1 $f19,FP19_OFFSET*F_SZ(a1)
223        swc1 $f20,FP20_OFFSET*F_SZ(a1)
224        swc1 $f21,FP21_OFFSET*F_SZ(a1)
225        swc1 $f22,FP22_OFFSET*F_SZ(a1)
226        swc1 $f23,FP23_OFFSET*F_SZ(a1)
227        swc1 $f24,FP24_OFFSET*F_SZ(a1)
228        swc1 $f25,FP25_OFFSET*F_SZ(a1)
229        swc1 $f26,FP26_OFFSET*F_SZ(a1)
230        swc1 $f27,FP27_OFFSET*F_SZ(a1)
231        swc1 $f28,FP28_OFFSET*F_SZ(a1)
232        swc1 $f29,FP29_OFFSET*F_SZ(a1)
233        swc1 $f30,FP30_OFFSET*F_SZ(a1)
234        swc1 $f31,FP31_OFFSET*F_SZ(a1)
235        j ra
236        nop
237        .set at
238ENDFRAME(_CPU_Context_save_fp)
239#endif
240
241/*
242 *  _CPU_Context_restore_fp_context
243 *
244 *  This routine is responsible for restoring the FP context
245 *  at *fp_context_ptr.  If the point to load the FP context
246 *  from is changed then the pointer is modified by this routine.
247 *
248 *  Sometimes a macro implementation of this is in cpu.h which dereferences
249 *  the ** and a similarly named routine in this file is passed something
250 *  like a (Context_Control_fp *).  The general rule on making this decision
251 *  is to avoid writing assembly language.
252 */
253
254/* void _CPU_Context_restore_fp(
255 *   void **fp_context_ptr
256 * )
257 */
258
259#if ( CPU_HARDWARE_FP == FALSE )
260FRAME(_CPU_Context_restore_fp,sp,0,ra)
261        .set noat
262       
263        /*
264        ** Make sure the FPU is on before we retrieve state.  This code
265        ** is here because the FPU context switch might occur when an
266        ** integer task is switching out with a FP task switching in.
267        */
268       
269        MFC0    t0,C0_SR       
270        li      k0,SR_CU1       
271        or      t0,k0
272        MTC0    t0,C0_SR       
273
274        ld a1,(a0)
275        NOP
276        .globl _CPU_Context_restore_fp_from_exception
277_CPU_Context_restore_fp_from_exception:
278        lwc1 $f0,FP0_OFFSET*4(a1)
279        lwc1 $f1,FP1_OFFSET*4(a1)
280        lwc1 $f2,FP2_OFFSET*4(a1)
281        lwc1 $f3,FP3_OFFSET*4(a1)
282        lwc1 $f4,FP4_OFFSET*4(a1)
283        lwc1 $f5,FP5_OFFSET*4(a1)
284        lwc1 $f6,FP6_OFFSET*4(a1)
285        lwc1 $f7,FP7_OFFSET*4(a1)
286        lwc1 $f8,FP8_OFFSET*4(a1)
287        lwc1 $f9,FP9_OFFSET*4(a1)
288        lwc1 $f10,FP10_OFFSET*4(a1)
289        lwc1 $f11,FP11_OFFSET*4(a1)
290        lwc1 $f12,FP12_OFFSET*4(a1)
291        lwc1 $f13,FP13_OFFSET*4(a1)
292        lwc1 $f14,FP14_OFFSET*4(a1)
293        lwc1 $f15,FP15_OFFSET*4(a1)
294        lwc1 $f16,FP16_OFFSET*4(a1)
295        lwc1 $f17,FP17_OFFSET*4(a1)
296        lwc1 $f18,FP18_OFFSET*4(a1)
297        lwc1 $f19,FP19_OFFSET*4(a1)
298        lwc1 $f20,FP20_OFFSET*4(a1)
299        lwc1 $f21,FP21_OFFSET*4(a1)
300        lwc1 $f22,FP22_OFFSET*4(a1)
301        lwc1 $f23,FP23_OFFSET*4(a1)
302        lwc1 $f24,FP24_OFFSET*4(a1)
303        lwc1 $f25,FP25_OFFSET*4(a1)
304        lwc1 $f26,FP26_OFFSET*4(a1)
305        lwc1 $f27,FP27_OFFSET*4(a1)
306        lwc1 $f28,FP28_OFFSET*4(a1)
307        lwc1 $f29,FP29_OFFSET*4(a1)
308        lwc1 $f30,FP30_OFFSET*4(a1)
309        lwc1 $f31,FP31_OFFSET*4(a1)
310        j ra
311        nop
312        .set at
313ENDFRAME(_CPU_Context_restore_fp)
314#endif
315
316/*  _CPU_Context_switch
317 *
318 *  This routine performs a normal non-FP context switch.
319 */
320
321/* void _CPU_Context_switch(
322 *   Context_Control  *run,
323 *   Context_Control  *heir
324 * )
325 */
326
327FRAME(_CPU_Context_switch,sp,0,ra)
328
329        MFC0  t0,C0_SR
330        li    t1,~(SR_INTERRUPT_ENABLE_BITS)
331        STREG t0,C0_SR_OFFSET*R_SZ(a0)
332        and   t0,t1
333#if __mips == 3
334        ori   t0,(SR_EXL|SR_IE)         /* enable exception level to disable interrupts */
335#endif
336        MTC0  t0,C0_SR
337
338        STREG ra,RA_OFFSET*R_SZ(a0)     /* save current context */
339        STREG sp,SP_OFFSET*R_SZ(a0)
340        STREG fp,FP_OFFSET*R_SZ(a0)
341        STREG s0,S0_OFFSET*R_SZ(a0)
342        STREG s1,S1_OFFSET*R_SZ(a0)
343        STREG s2,S2_OFFSET*R_SZ(a0)
344        STREG s3,S3_OFFSET*R_SZ(a0)
345        STREG s4,S4_OFFSET*R_SZ(a0)
346        STREG s5,S5_OFFSET*R_SZ(a0)
347        STREG s6,S6_OFFSET*R_SZ(a0)
348        STREG s7,S7_OFFSET*R_SZ(a0)
349
350        /*  EPC is readonly...
351        MFC0  t0,C0_EPC
352        NOP
353        STREG t0,C0_EPC_OFFSET*R_SZ(a0)
354        */
355
356_CPU_Context_switch_restore:
357        LDREG ra,RA_OFFSET*R_SZ(a1)         /* restore context */
358        LDREG sp,SP_OFFSET*R_SZ(a1)
359        LDREG fp,FP_OFFSET*R_SZ(a1)
360        LDREG s0,S0_OFFSET*R_SZ(a1)
361        LDREG s1,S1_OFFSET*R_SZ(a1)
362        LDREG s2,S2_OFFSET*R_SZ(a1)
363        LDREG s3,S3_OFFSET*R_SZ(a1)
364        LDREG s4,S4_OFFSET*R_SZ(a1)
365        LDREG s5,S5_OFFSET*R_SZ(a1)
366        LDREG s6,S6_OFFSET*R_SZ(a1)
367        LDREG s7,S7_OFFSET*R_SZ(a1)
368
369        /*  EPC is readonly...
370        LDREG t0,C0_EPC_OFFSET*R_SZ(a1)
371        NOP
372        MTC0  t0,C0_EPC
373        */
374
375        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
376       
377//      NOP
378//#if __mips == 3
379//        andi  t0,SR_EXL
380//        bnez  t0,_CPU_Context_1   /* set exception level from restore context */
381//        li    t0,~SR_EXL
382//        MFC0  t1,C0_SR
383//        NOP
384//        and   t1,t0
385//        MTC0  t1,C0_SR
386//
387//#elif __mips == 1
388//
389//        andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
390//        beq   t0,$0,_CPU_Context_1          /* set level from restore context */
391//        MFC0  t0,C0_SR
392//        NOP
393//        or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  */
394//        MTC0  t0,C0_SR                      /* set with enabled */
395//        NOP
396
397       
398/*
399** Incorporate the new thread's FP coprocessor state and interrupt mask/enable
400** into the status register.  We jump thru the requisite hoops to ensure we
401** maintain all other SR bits as global values.
402**
403** Get the thread's FPU enable, int mask & int enable bits.  Although we keep the
404** software int enables on a per-task basis, the rtems_task_create
405** Interrupt Level & int level manipulation functions cannot enable/disable them,
406** so they are automatically enabled for all tasks.  To turn them off, a thread 
407** must itself manipulate the SR register.
408*/
409
410#if __mips == 3
411        li      k0,(SR_CU1 | SR_IMASK | SR_EXL | SR_IE)
412#elif __mips == 1
413        li      k0,(SR_CU1 | SR_IMASK | SR_IEC)
414#endif
415        and     t0,k0           
416               
417        MFC0    t1,C0_SR        /* grab the current SR */
418        not     k0              /* invert k0 so we can clear out the SR bits */
419        and     t1,k0
420
421        or      t0,t1           /* setup the new task's SR value */
422
423        MTC0    t0,C0_SR        /* and load the new SR */
424        NOP
425       
426/* _CPU_Context_1: */
427        j ra
428        NOP
429ENDFRAME(_CPU_Context_switch)
430
431/*
432 *  _CPU_Context_restore
433 *
434 *  This routine is generally used only to restart self in an
435 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
436 *
437 *  NOTE: May be unnecessary to reload some registers.
438 *
439 *  void _CPU_Context_restore(
440 *    Context_Control *new_context
441 *  );
442 */
443
444FRAME(_CPU_Context_restore,sp,0,ra)
445        ADD a1,a0,zero
446        j   _CPU_Context_switch_restore
447        NOP
448ENDFRAME(_CPU_Context_restore)
449
450ASM_EXTERN(_ISR_Nest_level, SZ_INT)
451ASM_EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
452ASM_EXTERN(_Context_Switch_necessary,SZ_INT)
453ASM_EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
454ASM_EXTERN(_Thread_Executing,SZ_INT)
455.extern _Thread_Dispatch
456.extern _ISR_Vector_table
457
458/*  void __ISR_Handler()
459 *
460 *  This routine provides the RTEMS interrupt management.
461 *
462 *  void _ISR_Handler()
463 *
464 *
465 *  This discussion ignores a lot of the ugly details in a real
466 *  implementation such as saving enough registers/state to be
467 *  able to do something real.  Keep in mind that the goal is
468 *  to invoke a user's ISR handler which is written in C and
469 *  uses a certain set of registers.
470 *
471 *  Also note that the exact order is to a large extent flexible.
472 *  Hardware will dictate a sequence for a certain subset of
473 *  _ISR_Handler while requirements for setting
474 *
475 *  At entry to "common" _ISR_Handler, the vector number must be
476 *  available.  On some CPUs the hardware puts either the vector
477 *  number or the offset into the vector table for this ISR in a
478 *  known place.  If the hardware does not give us this information,
479 *  then the assembly portion of RTEMS for this port will contain
480 *  a set of distinct interrupt entry points which somehow place
481 *  the vector number in a known place (which is safe if another
482 *  interrupt nests this one) and branches to _ISR_Handler.
483 *
484 */
485
486FRAME(_ISR_Handler,sp,0,ra)
487        .set noreorder
488
489        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
490
491        /* wastes a lot of stack space for context?? */
492        ADDIU    sp,sp,-EXCP_STACK_SIZE
493
494        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
495        STREG v0, R_V0*R_SZ(sp)
496        STREG v1, R_V1*R_SZ(sp)
497        STREG a0, R_A0*R_SZ(sp)
498        STREG a1, R_A1*R_SZ(sp)
499        STREG a2, R_A2*R_SZ(sp)
500        STREG a3, R_A3*R_SZ(sp)
501        STREG t0, R_T0*R_SZ(sp)
502        STREG t1, R_T1*R_SZ(sp)
503        STREG t2, R_T2*R_SZ(sp)
504        STREG t3, R_T3*R_SZ(sp)
505        STREG t4, R_T4*R_SZ(sp)
506        STREG t5, R_T5*R_SZ(sp)
507        STREG t6, R_T6*R_SZ(sp)
508        STREG t7, R_T7*R_SZ(sp)
509        mflo  t0
510        STREG t8, R_T8*R_SZ(sp)
511        STREG t0, R_MDLO*R_SZ(sp)
512        STREG t9, R_T9*R_SZ(sp)
513        mfhi  t0
514        STREG gp, R_GP*R_SZ(sp)
515        STREG t0, R_MDHI*R_SZ(sp)
516        STREG fp, R_FP*R_SZ(sp)
517        .set noat
518        STREG AT, R_AT*R_SZ(sp)
519        .set at
520
521        MFC0     t0,C0_SR
522        MFC0     t1,C0_EPC
523        STREG    t0,R_SR*R_SZ(sp)
524        STREG    t1,R_EPC*R_SZ(sp)
525       
526
527#ifdef INSTRUMENT_EXECUTING_THREAD
528        lw t2, _Thread_Executing
529        nop
530        sw t2, 0x8001FFF0
531#endif
532       
533        /* determine if an interrupt generated this exception */
534
535        MFC0     k0,C0_CAUSE
536        NOP
537
538        and      k1,k0,CAUSE_EXCMASK
539        beq      k1, 0, _ISR_Handler_1
540
541_ISR_Handler_Exception:
542
543        /*  If we return from the exception, it is assumed nothing
544         *  bad is going on and we can continue to run normally.
545         *  But we want to save the entire CPU context so exception
546         *  handlers can look at it and change it.
547         *
548         *  NOTE: This is the path the debugger stub will take.
549         */
550
551        STREG    sp,SP_OFFSET*R_SZ(sp)     /* save sp */
552
553        STREG    s0,S0_OFFSET*R_SZ(sp)     /* save s0 - s7 */
554        STREG    s1,S1_OFFSET*R_SZ(sp)
555        STREG    s2,S2_OFFSET*R_SZ(sp)
556        STREG    s3,S3_OFFSET*R_SZ(sp)
557        STREG    s4,S4_OFFSET*R_SZ(sp)
558        STREG    s5,S5_OFFSET*R_SZ(sp)
559        STREG    s6,S6_OFFSET*R_SZ(sp)
560        STREG    s7,S7_OFFSET*R_SZ(sp)
561
562        MFC0     k0,C0_CAUSE               /* save cause */
563        NOP
564        STREG    k0,R_CAUSE*R_SZ(sp)
565
566        /* CP0 special registers */
567
568        MFC0     t0,C0_BADVADDR
569        nop
570        STREG    t0,R_BADVADDR*R_SZ(sp)
571
572#if ( CPU_HARDWARE_FP == TRUE )
573        MFC0     t0,C0_SR                 /* FPU is enabled, save state */
574        srl      t0,t0,16
575        andi     t0,t0,(SR_CU1 >> 16)
576        beqz     t0, 1f
577        nop
578       
579        la       a1,R_F0*R_SZ(sp)
580        jal      _CPU_Context_save_fp_from_exception
581        nop
582        MFC1     t0,C1_REVISION
583        MFC1     t1,C1_STATUS
584        STREG    t0,R_FEIR*R_SZ(sp)
585        STREG    t1,R_FCSR*R_SZ(sp)
586       
5871:     
588#endif
589        move     a0,sp
590        jal      mips_vector_exceptions
591        nop
592
593#if ( CPU_HARDWARE_FP == TRUE )
594        MFC0     t0,C0_SR               /* FPU is enabled, restore state */
595        srl      t0,t0,16
596        andi     t0,t0,(SR_CU1 >> 16)
597        beqz     t0, 2f
598        nop
599       
600        la       a1,R_F0*R_SZ(sp)
601        jal      _CPU_Context_restore_fp_from_exception
602        nop
603        LDREG    t0,R_FEIR*R_SZ(sp)
604        LDREG    t1,R_FCSR*R_SZ(sp)
605        MTC1     t0,C1_REVISION
606        MTC1     t1,C1_STATUS
6072:
608#endif
609        LDREG    s0,S0_OFFSET*R_SZ(sp)    /* restore s0 - s7 */
610        LDREG    s1,S1_OFFSET*R_SZ(sp)
611        LDREG    s2,S2_OFFSET*R_SZ(sp)
612        LDREG    s3,S3_OFFSET*R_SZ(sp)
613        LDREG    s4,S4_OFFSET*R_SZ(sp)
614        LDREG    s5,S5_OFFSET*R_SZ(sp)
615        LDREG    s6,S6_OFFSET*R_SZ(sp)
616        LDREG    s7,S7_OFFSET*R_SZ(sp)
617
618        /* do NOT restore the sp as this could mess up the world */
619        /* do NOT restore the cause as this could mess up the world */
620
621        j        _ISR_Handler_exit
622        nop
623
624_ISR_Handler_1:
625
626        MFC0     k1,C0_SR
627        and      k0,CAUSE_IPMASK
628        and      k0,k1
629
630        /* external interrupt not enabled, ignore */
631        /* but if it's not an exception or an interrupt, */
632        /* Then where did it come from??? */
633       
634        beq      k0,zero,_ISR_Handler_exit
635
636       
637       
638       
639               
640  /*
641   *  save some or all context on stack
642   *  may need to save some special interrupt information for exit
643   *
644   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
645   *    if ( _ISR_Nest_level == 0 )
646   *      switch to software interrupt stack
647   *  #endif
648   */
649
650  /*
651   *  _ISR_Nest_level++;
652   */
653        LDREG  t0,_ISR_Nest_level
654        NOP
655        ADD    t0,t0,1
656        STREG  t0,_ISR_Nest_level
657  /*
658   *  _Thread_Dispatch_disable_level++;
659   */
660        LDREG  t1,_Thread_Dispatch_disable_level
661        NOP
662        ADD    t1,t1,1
663        STREG  t1,_Thread_Dispatch_disable_level
664
665  /*
666   *  Call the CPU model or BSP specific routine to decode the
667   *  interrupt source and actually vector to device ISR handlers.
668   */
669       
670#ifdef INSTRUMENT_ISR_VECTORING
671        nop
672        li      t1, 1
673        sw      t1, 0x8001e000
674#endif
675
676        move     a0,sp
677        jal      mips_vector_isr_handlers
678        nop
679       
680#ifdef INSTRUMENT_ISR_VECTORING
681        li      t1, 0
682        sw      t1, 0x8001e000
683        nop
684#endif
685               
686  /*
687   *  --_ISR_Nest_level;
688   */
689        LDREG  t2,_ISR_Nest_level
690        NOP
691        ADD    t2,t2,-1
692        STREG  t2,_ISR_Nest_level
693  /*
694   *  --_Thread_Dispatch_disable_level;
695   */
696        LDREG  t1,_Thread_Dispatch_disable_level
697        NOP
698        ADD    t1,t1,-1
699        STREG  t1,_Thread_Dispatch_disable_level
700  /*
701   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
702   *    goto the label "exit interrupt (simple case)"
703   */
704        or  t0,t2,t1
705        bne t0,zero,_ISR_Handler_exit
706        nop
707
708
709
710
711
712       
713
714       
715  /*
716   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
717   *    restore stack
718   *  #endif
719   * 
720   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
721   *    goto the label "exit interrupt (simple case)"
722   */
723        LDREG t0,_Context_Switch_necessary
724        LDREG t1,_ISR_Signals_to_thread_executing
725        NOP
726        or    t0,t0,t1
727        beq   t0,zero,_ISR_Handler_exit
728        nop
729
730       
731       
732#ifdef INSTRUMENT_EXECUTING_THREAD
733        lw      t0,_Thread_Executing
734        nop
735        sw      t0,0x8001FFF4
736#endif
737
738/*
739** Turn on interrupts before entering Thread_Dispatch which
740** will run for a while, thus allowing new interrupts to
741** be serviced.  Observe the Thread_Dispatch_disable_level interlock
742** that prevents recursive entry into Thread_Dispatch.
743*/
744
745        MFC0    t0, C0_SR
746        NOP
747        or      t0, SR_INTERRUPT_ENABLE_BITS
748        MTC0    t0, C0_SR
749        NOP
750               
751        jal     _Thread_Dispatch
752        NOP
753
754#ifdef INSTRUMENT_EXECUTING_THREAD
755        lw      t0,_Thread_Executing
756        nop
757        sw      t0,0x8001FFF8
758#endif
759
760       
761  /*
762   *  prepare to get out of interrupt
763   *  return from interrupt  (maybe to _ISR_Dispatch)
764   *
765   *  LABEL "exit interrupt (simple case):"
766   *  prepare to get out of interrupt
767   *  return from interrupt
768   */
769
770_ISR_Handler_exit:
771/*
772** Skip the SR restore because its a global register. _CPU_Context_switch_restore
773** adjusts it according to each task's configuration.  If we didn't dispatch, the
774** SR value isn't changing, so all we need to do is return.
775**
776*/
777
778        /* restore context from stack */
779       
780#ifdef INSTRUMENT_EXECUTING_THREAD
781        lw      t0,_Thread_Executing
782        nop
783        sw      t0, 0x8001FFFC
784#endif
785
786        LDREG k0, R_MDLO*R_SZ(sp)
787        LDREG t0, R_T0*R_SZ(sp)
788        mtlo  k0
789        LDREG k0, R_MDHI*R_SZ(sp)           
790        LDREG t1, R_T1*R_SZ(sp)
791        mthi  k0
792        LDREG t2, R_T2*R_SZ(sp)
793        LDREG t3, R_T3*R_SZ(sp)
794        LDREG t4, R_T4*R_SZ(sp)
795        LDREG t5, R_T5*R_SZ(sp)
796        LDREG t6, R_T6*R_SZ(sp)
797        LDREG t7, R_T7*R_SZ(sp)
798        LDREG t8, R_T8*R_SZ(sp)
799        LDREG t9, R_T9*R_SZ(sp)
800        LDREG gp, R_GP*R_SZ(sp)
801        LDREG fp, R_FP*R_SZ(sp)
802        LDREG ra, R_RA*R_SZ(sp)
803        LDREG a0, R_A0*R_SZ(sp)
804        LDREG a1, R_A1*R_SZ(sp)
805        LDREG a2, R_A2*R_SZ(sp)
806        LDREG a3, R_A3*R_SZ(sp)
807        LDREG v1, R_V1*R_SZ(sp)
808        LDREG v0, R_V0*R_SZ(sp)
809       
810        LDREG     k0, R_EPC*R_SZ(sp)
811       
812        .set noat
813        LDREG     AT, R_AT*R_SZ(sp)
814        .set at
815
816        ADDIU     sp,sp,EXCP_STACK_SIZE
817        j         k0
818        rfe
819        nop
820
821       .set    reorder
822ENDFRAME(_ISR_Handler)
823
824       
825FRAME(mips_break,sp,0,ra)
826#if 1
827        break 0x0
828        j mips_break
829#else
830        j ra
831#endif
832        nop
833ENDFRAME(mips_break)
834
Note: See TracBrowser for help on using the repository browser.