source: rtems/cpukit/score/cpu/mips/cpu_asm.S @ e6dec71c

4.104.114.84.95
Last change on this file since e6dec71c was e6dec71c, checked in by Joel Sherrill <joel.sherrill@…>, on 02/01/02 at 15:00:30

2001-02-01 Greg Menke <gregory.menke@…>

  • cpu.c: Enhancements and fixes for modifying the SR when changing the interrupt level.
  • cpu_asm.S: Fixed handling of FP enable bit so it is properly managed on a per-task basis, improved handling of interrupt levels, and made deferred FP contexts work on the MIPS.
  • rtems/score/cpu.h: Modified to support above changes.
  • Property mode set to 100644
File size: 19.9 KB
Line 
1/*
2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
22 *          the baseline of the more general MIPS port. 
23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
26 *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
27 *          performance, tweaking this code and the isr vectoring routines
28 *          to reduce overhead & latencies.  Added optional
29 *          instrumentation as well.
30 *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
31 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
32 *          and deferred FP contexts.
33 * 
34 *  COPYRIGHT (c) 1989-2000.
35 *  On-Line Applications Research Corporation (OAR).
36 *
37 *  The license and distribution terms for this file may be
38 *  found in the file LICENSE in this distribution or at
39 *  http://www.OARcorp.com/rtems/license.html.
40 *
41 *  $Id$
42 */
43
44#include <asm.h>
45#include "iregdef.h"
46#include "idtcpu.h"
47
48/* enable debugging shadow writes to misc ram, this is a vestigal
49* Mongoose-ism debug tool- but may be handy in the future so we
50* left it in...
51*/
52
53#define INSTRUMENT_ISR_VECTORING
54//#define INSTRUMENT_EXECUTING_THREAD
55
56
57       
58/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
59 *  and MIPS ISA Level 1 (R3xxx).
60 */
61
62#if __mips == 3
63/* 64 bit register operations */
64#define NOP     
65#define ADD     dadd
66#define STREG   sd
67#define LDREG   ld
68#define MFCO    dmfc0
69#define MTCO    dmtc0
70#define ADDU    addu
71#define ADDIU   addiu
72#define R_SZ    8
73#define F_SZ    8
74#define SZ_INT  8
75#define SZ_INT_POW2 3
76
77/* XXX if we don't always want 64 bit register ops, then another ifdef */
78
79#elif __mips == 1
80/* 32 bit register operations*/
81#define NOP     nop
82#define ADD     add
83#define STREG   sw
84#define LDREG   lw
85#define MFCO    mfc0
86#define MTCO    mtc0
87#define ADDU    add
88#define ADDIU   addi
89#define R_SZ    4
90#define F_SZ    4
91#define SZ_INT  4
92#define SZ_INT_POW2 2
93#else
94#error "mips assembly: what size registers do I deal with?"
95#endif
96
97
98#define ISR_VEC_SIZE    4
99#define EXCP_STACK_SIZE (NREGS*R_SZ)
100
101       
102#ifdef __GNUC__
103#define ASM_EXTERN(x,size) .extern x,size
104#else
105#define ASM_EXTERN(x,size)
106#endif
107
108/* NOTE: these constants must match the Context_Control structure in cpu.h */
109#define S0_OFFSET 0
110#define S1_OFFSET 1
111#define S2_OFFSET 2
112#define S3_OFFSET 3
113#define S4_OFFSET 4
114#define S5_OFFSET 5
115#define S6_OFFSET 6
116#define S7_OFFSET 7
117#define SP_OFFSET 8
118#define FP_OFFSET 9
119#define RA_OFFSET 10
120#define C0_SR_OFFSET 11
121/* #define C0_EPC_OFFSET 12 */
122
123/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
124#define FP0_OFFSET  0
125#define FP1_OFFSET  1
126#define FP2_OFFSET  2
127#define FP3_OFFSET  3
128#define FP4_OFFSET  4
129#define FP5_OFFSET  5
130#define FP6_OFFSET  6
131#define FP7_OFFSET  7
132#define FP8_OFFSET  8
133#define FP9_OFFSET  9
134#define FP10_OFFSET 10
135#define FP11_OFFSET 11
136#define FP12_OFFSET 12
137#define FP13_OFFSET 13
138#define FP14_OFFSET 14
139#define FP15_OFFSET 15
140#define FP16_OFFSET 16
141#define FP17_OFFSET 17
142#define FP18_OFFSET 18
143#define FP19_OFFSET 19
144#define FP20_OFFSET 20
145#define FP21_OFFSET 21
146#define FP22_OFFSET 22
147#define FP23_OFFSET 23
148#define FP24_OFFSET 24
149#define FP25_OFFSET 25
150#define FP26_OFFSET 26
151#define FP27_OFFSET 27
152#define FP28_OFFSET 28
153#define FP29_OFFSET 29
154#define FP30_OFFSET 30
155#define FP31_OFFSET 31
156
157
158/*
159 *  _CPU_Context_save_fp_context
160 *
161 *  This routine is responsible for saving the FP context
162 *  at *fp_context_ptr.  If the point to load the FP context
163 *  from is changed then the pointer is modified by this routine.
164 *
165 *  Sometimes a macro implementation of this is in cpu.h which dereferences
166 *  the ** and a similarly named routine in this file is passed something
167 *  like a (Context_Control_fp *).  The general rule on making this decision
168 *  is to avoid writing assembly language.
169 */
170
171/* void _CPU_Context_save_fp(
172 *   void **fp_context_ptr
173 * );
174 */
175
176#if ( CPU_HARDWARE_FP == FALSE )
177FRAME(_CPU_Context_save_fp,sp,0,ra)
178        .set noat
179
180#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
181        /*
182        ** Make sure the FPU is on before we save state.  This code is here
183        ** because the FPU context switch might occur when an integer
184        ** task is switching out w/ an FP task switching in, but the current
185        ** FPU state was left by a sometime previously scheduled FP task.
186        **
187        ** In non-deferred FP context switch, if the exiting task is FP, then
188        ** the FPU is already on so we don't need to do this.
189        */
190       
191        MFC0    t0,C0_SR       
192        li      k0,SR_CU1       
193        or      t0,k0
194        MTC0    t0,C0_SR       
195#endif
196               
197        ld   a1,(a0)
198        NOP
199        swc1 $f0,FP0_OFFSET*F_SZ(a1)
200        swc1 $f1,FP1_OFFSET*F_SZ(a1)
201        swc1 $f2,FP2_OFFSET*F_SZ(a1)
202        swc1 $f3,FP3_OFFSET*F_SZ(a1)
203        swc1 $f4,FP4_OFFSET*F_SZ(a1)
204        swc1 $f5,FP5_OFFSET*F_SZ(a1)
205        swc1 $f6,FP6_OFFSET*F_SZ(a1)
206        swc1 $f7,FP7_OFFSET*F_SZ(a1)
207        swc1 $f8,FP8_OFFSET*F_SZ(a1)
208        swc1 $f9,FP9_OFFSET*F_SZ(a1)
209        swc1 $f10,FP10_OFFSET*F_SZ(a1)
210        swc1 $f11,FP11_OFFSET*F_SZ(a1)
211        swc1 $f12,FP12_OFFSET*F_SZ(a1)
212        swc1 $f13,FP13_OFFSET*F_SZ(a1)
213        swc1 $f14,FP14_OFFSET*F_SZ(a1)
214        swc1 $f15,FP15_OFFSET*F_SZ(a1)
215        swc1 $f16,FP16_OFFSET*F_SZ(a1)
216        swc1 $f17,FP17_OFFSET*F_SZ(a1)
217        swc1 $f18,FP18_OFFSET*F_SZ(a1)
218        swc1 $f19,FP19_OFFSET*F_SZ(a1)
219        swc1 $f20,FP20_OFFSET*F_SZ(a1)
220        swc1 $f21,FP21_OFFSET*F_SZ(a1)
221        swc1 $f22,FP22_OFFSET*F_SZ(a1)
222        swc1 $f23,FP23_OFFSET*F_SZ(a1)
223        swc1 $f24,FP24_OFFSET*F_SZ(a1)
224        swc1 $f25,FP25_OFFSET*F_SZ(a1)
225        swc1 $f26,FP26_OFFSET*F_SZ(a1)
226        swc1 $f27,FP27_OFFSET*F_SZ(a1)
227        swc1 $f28,FP28_OFFSET*F_SZ(a1)
228        swc1 $f29,FP29_OFFSET*F_SZ(a1)
229        swc1 $f30,FP30_OFFSET*F_SZ(a1)
230        swc1 $f31,FP31_OFFSET*F_SZ(a1)
231        j ra
232        nop
233        .set at
234ENDFRAME(_CPU_Context_save_fp)
235#endif
236
237/*
238 *  _CPU_Context_restore_fp_context
239 *
240 *  This routine is responsible for restoring the FP context
241 *  at *fp_context_ptr.  If the point to load the FP context
242 *  from is changed then the pointer is modified by this routine.
243 *
244 *  Sometimes a macro implementation of this is in cpu.h which dereferences
245 *  the ** and a similarly named routine in this file is passed something
246 *  like a (Context_Control_fp *).  The general rule on making this decision
247 *  is to avoid writing assembly language.
248 */
249
250/* void _CPU_Context_restore_fp(
251 *   void **fp_context_ptr
252 * )
253 */
254
255#if ( CPU_HARDWARE_FP == FALSE )
256FRAME(_CPU_Context_restore_fp,sp,0,ra)
257        .set noat
258       
259        /*
260        ** Make sure the FPU is on before we retrieve state.  This code
261        ** is here because the FPU context switch might occur when an
262        ** integer task is switching out with a FP task switching in.
263        */
264       
265        MFC0    t0,C0_SR       
266        li      k0,SR_CU1       
267        or      t0,k0
268        MTC0    t0,C0_SR       
269
270        ld a1,(a0)
271        NOP
272        lwc1 $f0,FP0_OFFSET*4(a1)
273        lwc1 $f1,FP1_OFFSET*4(a1)
274        lwc1 $f2,FP2_OFFSET*4(a1)
275        lwc1 $f3,FP3_OFFSET*4(a1)
276        lwc1 $f4,FP4_OFFSET*4(a1)
277        lwc1 $f5,FP5_OFFSET*4(a1)
278        lwc1 $f6,FP6_OFFSET*4(a1)
279        lwc1 $f7,FP7_OFFSET*4(a1)
280        lwc1 $f8,FP8_OFFSET*4(a1)
281        lwc1 $f9,FP9_OFFSET*4(a1)
282        lwc1 $f10,FP10_OFFSET*4(a1)
283        lwc1 $f11,FP11_OFFSET*4(a1)
284        lwc1 $f12,FP12_OFFSET*4(a1)
285        lwc1 $f13,FP13_OFFSET*4(a1)
286        lwc1 $f14,FP14_OFFSET*4(a1)
287        lwc1 $f15,FP15_OFFSET*4(a1)
288        lwc1 $f16,FP16_OFFSET*4(a1)
289        lwc1 $f17,FP17_OFFSET*4(a1)
290        lwc1 $f18,FP18_OFFSET*4(a1)
291        lwc1 $f19,FP19_OFFSET*4(a1)
292        lwc1 $f20,FP20_OFFSET*4(a1)
293        lwc1 $f21,FP21_OFFSET*4(a1)
294        lwc1 $f22,FP22_OFFSET*4(a1)
295        lwc1 $f23,FP23_OFFSET*4(a1)
296        lwc1 $f24,FP24_OFFSET*4(a1)
297        lwc1 $f25,FP25_OFFSET*4(a1)
298        lwc1 $f26,FP26_OFFSET*4(a1)
299        lwc1 $f27,FP27_OFFSET*4(a1)
300        lwc1 $f28,FP28_OFFSET*4(a1)
301        lwc1 $f29,FP29_OFFSET*4(a1)
302        lwc1 $f30,FP30_OFFSET*4(a1)
303        lwc1 $f31,FP31_OFFSET*4(a1)
304        j ra
305        nop
306        .set at
307ENDFRAME(_CPU_Context_restore_fp)
308#endif
309
310/*  _CPU_Context_switch
311 *
312 *  This routine performs a normal non-FP context switch.
313 */
314
315/* void _CPU_Context_switch(
316 *   Context_Control  *run,
317 *   Context_Control  *heir
318 * )
319 */
320
321FRAME(_CPU_Context_switch,sp,0,ra)
322
323        MFC0  t0,C0_SR
324        li    t1,~(SR_INTERRUPT_ENABLE_BITS)
325        STREG t0,C0_SR_OFFSET*R_SZ(a0)
326        and   t0,t1
327#if __mips == 3
328        ori   t0,(SR_EXL|SR_IE)         /* enable exception level to disable interrupts */
329#endif
330        MTC0  t0,C0_SR
331
332        STREG ra,RA_OFFSET*R_SZ(a0)     /* save current context */
333        STREG sp,SP_OFFSET*R_SZ(a0)
334        STREG fp,FP_OFFSET*R_SZ(a0)
335        STREG s0,S0_OFFSET*R_SZ(a0)
336        STREG s1,S1_OFFSET*R_SZ(a0)
337        STREG s2,S2_OFFSET*R_SZ(a0)
338        STREG s3,S3_OFFSET*R_SZ(a0)
339        STREG s4,S4_OFFSET*R_SZ(a0)
340        STREG s5,S5_OFFSET*R_SZ(a0)
341        STREG s6,S6_OFFSET*R_SZ(a0)
342        STREG s7,S7_OFFSET*R_SZ(a0)
343
344        /*  EPC is readonly...
345        MFC0  t0,C0_EPC
346        NOP
347        STREG t0,C0_EPC_OFFSET*R_SZ(a0)
348        */
349
350_CPU_Context_switch_restore:
351        LDREG ra,RA_OFFSET*R_SZ(a1)         /* restore context */
352        LDREG sp,SP_OFFSET*R_SZ(a1)
353        LDREG fp,FP_OFFSET*R_SZ(a1)
354        LDREG s0,S0_OFFSET*R_SZ(a1)
355        LDREG s1,S1_OFFSET*R_SZ(a1)
356        LDREG s2,S2_OFFSET*R_SZ(a1)
357        LDREG s3,S3_OFFSET*R_SZ(a1)
358        LDREG s4,S4_OFFSET*R_SZ(a1)
359        LDREG s5,S5_OFFSET*R_SZ(a1)
360        LDREG s6,S6_OFFSET*R_SZ(a1)
361        LDREG s7,S7_OFFSET*R_SZ(a1)
362
363        /*  EPC is readonly...
364        LDREG t0,C0_EPC_OFFSET*R_SZ(a1)
365        NOP
366        MTC0  t0,C0_EPC
367        */
368
369        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
370       
371//      NOP
372//#if __mips == 3
373//        andi  t0,SR_EXL
374//        bnez  t0,_CPU_Context_1   /* set exception level from restore context */
375//        li    t0,~SR_EXL
376//        MFC0  t1,C0_SR
377//        NOP
378//        and   t1,t0
379//        MTC0  t1,C0_SR
380//
381//#elif __mips == 1
382//
383//        andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
384//        beq   t0,$0,_CPU_Context_1          /* set level from restore context */
385//        MFC0  t0,C0_SR
386//        NOP
387//        or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  */
388//        MTC0  t0,C0_SR                      /* set with enabled */
389//        NOP
390
391       
392/*
393** Incorporate the new thread's FP coprocessor state and interrupt mask/enable
394** into the status register.  We jump thru the requisite hoops to ensure we
395** maintain all other SR bits as global values.
396**
397** Get the thread's FPU enable, int mask & int enable bits.  Although we keep the
398** software int enables on a per-task basis, the rtems_task_create
399** Interrupt Level & int level manipulation functions cannot enable/disable them,
400** so they are automatically enabled for all tasks.  To turn them off, a thread 
401** must itself manipulate the SR register.
402*/
403
404#if __mips == 3
405        li      k0,(SR_CU1 | SR_IMASK | SR_EXL | SR_IE)
406#elif __mips == 1
407        li      k0,(SR_CU1 | SR_IMASK | SR_IEC)
408#endif
409        and     t0,k0           
410               
411        MFC0    t1,C0_SR        /* grab the current SR */
412        not     k0              /* invert k0 so we can clear out the SR bits */
413        and     t1,k0
414
415        or      t0,t1           /* setup the new task's SR value */
416
417        MTC0    t0,C0_SR        /* and load the new SR */
418        NOP
419       
420/* _CPU_Context_1: */
421        j ra
422        NOP
423ENDFRAME(_CPU_Context_switch)
424
425/*
426 *  _CPU_Context_restore
427 *
428 *  This routine is generally used only to restart self in an
429 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
430 *
431 *  NOTE: May be unnecessary to reload some registers.
432 *
433 *  void _CPU_Context_restore(
434 *    Context_Control *new_context
435 *  );
436 */
437
438FRAME(_CPU_Context_restore,sp,0,ra)
439        ADD a1,a0,zero
440        j   _CPU_Context_switch_restore
441        NOP
442ENDFRAME(_CPU_Context_restore)
443
444ASM_EXTERN(_ISR_Nest_level, SZ_INT)
445ASM_EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
446ASM_EXTERN(_Context_Switch_necessary,SZ_INT)
447ASM_EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
448ASM_EXTERN(_Thread_Executing,SZ_INT)
449.extern _Thread_Dispatch
450.extern _ISR_Vector_table
451
452/*  void __ISR_Handler()
453 *
454 *  This routine provides the RTEMS interrupt management.
455 *
456 *  void _ISR_Handler()
457 *
458 *
459 *  This discussion ignores a lot of the ugly details in a real
460 *  implementation such as saving enough registers/state to be
461 *  able to do something real.  Keep in mind that the goal is
462 *  to invoke a user's ISR handler which is written in C and
463 *  uses a certain set of registers.
464 *
465 *  Also note that the exact order is to a large extent flexible.
466 *  Hardware will dictate a sequence for a certain subset of
467 *  _ISR_Handler while requirements for setting
468 *
469 *  At entry to "common" _ISR_Handler, the vector number must be
470 *  available.  On some CPUs the hardware puts either the vector
471 *  number or the offset into the vector table for this ISR in a
472 *  known place.  If the hardware does not give us this information,
473 *  then the assembly portion of RTEMS for this port will contain
474 *  a set of distinct interrupt entry points which somehow place
475 *  the vector number in a known place (which is safe if another
476 *  interrupt nests this one) and branches to _ISR_Handler.
477 *
478 */
479
480FRAME(_ISR_Handler,sp,0,ra)
481        .set noreorder
482
483        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
484
485        /* wastes a lot of stack space for context?? */
486        ADDIU    sp,sp,-EXCP_STACK_SIZE
487
488        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
489        STREG v0, R_V0*R_SZ(sp)
490        STREG v1, R_V1*R_SZ(sp)
491        STREG a0, R_A0*R_SZ(sp)
492        STREG a1, R_A1*R_SZ(sp)
493        STREG a2, R_A2*R_SZ(sp)
494        STREG a3, R_A3*R_SZ(sp)
495        STREG t0, R_T0*R_SZ(sp)
496        STREG t1, R_T1*R_SZ(sp)
497        STREG t2, R_T2*R_SZ(sp)
498        STREG t3, R_T3*R_SZ(sp)
499        STREG t4, R_T4*R_SZ(sp)
500        STREG t5, R_T5*R_SZ(sp)
501        STREG t6, R_T6*R_SZ(sp)
502        STREG t7, R_T7*R_SZ(sp)
503        mflo  t0
504        STREG t8, R_T8*R_SZ(sp)
505        STREG t0, R_MDLO*R_SZ(sp)
506        STREG t9, R_T9*R_SZ(sp)
507        mfhi  t0
508        STREG gp, R_GP*R_SZ(sp)
509        STREG t0, R_MDHI*R_SZ(sp)
510        STREG fp, R_FP*R_SZ(sp)
511        .set noat
512        STREG AT, R_AT*R_SZ(sp)
513        .set at
514
515        MFC0     t0,C0_SR
516        MFC0     t1,C0_EPC
517        STREG    t0,R_SR*R_SZ(sp)
518        STREG    t1,R_EPC*R_SZ(sp)
519       
520
521#ifdef INSTRUMENT_EXECUTING_THREAD
522        lw t2, _Thread_Executing
523        nop
524        sw t2, 0x8001FFF0
525#endif
526       
527        /* determine if an interrupt generated this exception */
528
529        MFC0     k0,C0_CAUSE
530        NOP
531
532        and      k1,k0,CAUSE_EXCMASK
533        beq      k1, 0, _ISR_Handler_1
534
535_ISR_Handler_Exception:
536
537        /* if we return from the exception, it is assumed nothing */
538        /* bad is going on and we can continue to run normally */
539       
540        move     a0,sp
541        jal      mips_vector_exceptions
542        nop
543        j        _ISR_Handler_exit
544        nop
545
546_ISR_Handler_1:
547
548        MFC0     k1,C0_SR
549        and      k0,CAUSE_IPMASK
550        and      k0,k1
551
552        /* external interrupt not enabled, ignore */
553        /* but if it's not an exception or an interrupt, */
554        /* Then where did it come from??? */
555       
556        beq      k0,zero,_ISR_Handler_exit
557
558       
559       
560       
561               
562  /*
563   *  save some or all context on stack
564   *  may need to save some special interrupt information for exit
565   *
566   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
567   *    if ( _ISR_Nest_level == 0 )
568   *      switch to software interrupt stack
569   *  #endif
570   */
571
572  /*
573   *  _ISR_Nest_level++;
574   */
575        LDREG  t0,_ISR_Nest_level
576        NOP
577        ADD    t0,t0,1
578        STREG  t0,_ISR_Nest_level
579  /*
580   *  _Thread_Dispatch_disable_level++;
581   */
582        LDREG  t1,_Thread_Dispatch_disable_level
583        NOP
584        ADD    t1,t1,1
585        STREG  t1,_Thread_Dispatch_disable_level
586
587  /*
588   *  Call the CPU model or BSP specific routine to decode the
589   *  interrupt source and actually vector to device ISR handlers.
590   */
591       
592#ifdef INSTRUMENT_ISR_VECTORING
593        nop
594        li      t1, 1
595        sw      t1, 0x8001e000
596#endif
597
598        move     a0,sp
599        jal      mips_vector_isr_handlers
600        nop
601       
602#ifdef INSTRUMENT_ISR_VECTORING
603        li      t1, 0
604        sw      t1, 0x8001e000
605        nop
606#endif
607               
608  /*
609   *  --_ISR_Nest_level;
610   */
611        LDREG  t2,_ISR_Nest_level
612        NOP
613        ADD    t2,t2,-1
614        STREG  t2,_ISR_Nest_level
615  /*
616   *  --_Thread_Dispatch_disable_level;
617   */
618        LDREG  t1,_Thread_Dispatch_disable_level
619        NOP
620        ADD    t1,t1,-1
621        STREG  t1,_Thread_Dispatch_disable_level
622  /*
623   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
624   *    goto the label "exit interrupt (simple case)"
625   */
626        or  t0,t2,t1
627        bne t0,zero,_ISR_Handler_exit
628        nop
629
630
631
632
633
634       
635
636       
637  /*
638   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
639   *    restore stack
640   *  #endif
641   * 
642   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
643   *    goto the label "exit interrupt (simple case)"
644   */
645        LDREG t0,_Context_Switch_necessary
646        LDREG t1,_ISR_Signals_to_thread_executing
647        NOP
648        or    t0,t0,t1
649        beq   t0,zero,_ISR_Handler_exit
650        nop
651
652       
653       
654#ifdef INSTRUMENT_EXECUTING_THREAD
655        lw      t0,_Thread_Executing
656        nop
657        sw      t0,0x8001FFF4
658#endif
659
660/*
661** Turn on interrupts before entering Thread_Dispatch which
662** will run for a while, thus allowing new interrupts to
663** be serviced.  Observe the Thread_Dispatch_disable_level interlock
664** that prevents recursive entry into Thread_Dispatch.
665*/
666
667        MFC0    t0, C0_SR
668        NOP
669        or      t0, SR_INTERRUPT_ENABLE_BITS
670        MTC0    t0, C0_SR
671        NOP
672               
673        jal     _Thread_Dispatch
674        NOP
675
676#ifdef INSTRUMENT_EXECUTING_THREAD
677        lw      t0,_Thread_Executing
678        nop
679        sw      t0,0x8001FFF8
680#endif
681
682       
683  /*
684   *  prepare to get out of interrupt
685   *  return from interrupt  (maybe to _ISR_Dispatch)
686   *
687   *  LABEL "exit interrupt (simple case):"
688   *  prepare to get out of interrupt
689   *  return from interrupt
690   */
691
692_ISR_Handler_exit:
693/*
694** Skip the SR restore because its a global register. _CPU_Context_switch_restore
695** adjusts it according to each task's configuration.  If we didn't dispatch, the
696** SR value isn't changing, so all we need to do is return.
697**
698*/
699
700        /* restore context from stack */
701       
702#ifdef INSTRUMENT_EXECUTING_THREAD
703        lw      t0,_Thread_Executing
704        nop
705        sw      t0, 0x8001FFFC
706#endif
707
708        LDREG k0, R_MDLO*R_SZ(sp)
709        LDREG t0, R_T0*R_SZ(sp)
710        mtlo  k0
711        LDREG k0, R_MDHI*R_SZ(sp)           
712        LDREG t1, R_T1*R_SZ(sp)
713        mthi  k0
714        LDREG t2, R_T2*R_SZ(sp)
715        LDREG t3, R_T3*R_SZ(sp)
716        LDREG t4, R_T4*R_SZ(sp)
717        LDREG t5, R_T5*R_SZ(sp)
718        LDREG t6, R_T6*R_SZ(sp)
719        LDREG t7, R_T7*R_SZ(sp)
720        LDREG t8, R_T8*R_SZ(sp)
721        LDREG t9, R_T9*R_SZ(sp)
722        LDREG gp, R_GP*R_SZ(sp)
723        LDREG fp, R_FP*R_SZ(sp)
724        LDREG ra, R_RA*R_SZ(sp)
725        LDREG a0, R_A0*R_SZ(sp)
726        LDREG a1, R_A1*R_SZ(sp)
727        LDREG a2, R_A2*R_SZ(sp)
728        LDREG a3, R_A3*R_SZ(sp)
729        LDREG v1, R_V1*R_SZ(sp)
730        LDREG v0, R_V0*R_SZ(sp)
731       
732        LDREG     k0, R_EPC*R_SZ(sp)
733       
734        .set noat
735        LDREG     AT, R_AT*R_SZ(sp)
736        .set at
737
738        ADDIU     sp,sp,EXCP_STACK_SIZE
739        j         k0
740        rfe
741        nop
742
743       .set    reorder
744ENDFRAME(_ISR_Handler)
745
746       
747FRAME(mips_break,sp,0,ra)
748#if 1
749        break 0x0
750        j mips_break
751#else
752        j ra
753#endif
754        nop
755ENDFRAME(mips_break)
756
Note: See TracBrowser for help on using the repository browser.