source: rtems/cpukit/score/cpu/mips/cpu_asm.S @ 0c9eaef

4.104.114.84.95
Last change on this file since 0c9eaef was 0c9eaef, checked in by Joel Sherrill <joel.sherrill@…>, on 04/03/04 at 16:29:13

2004-04-03 Art Ferrer <arturo.b.ferrer@…>

PR 598/bsps

  • cpu_asm.S, rtems/score/cpu.h: Add save of floating point status/control register on context switches. Missing this register was causing intermittent floating point errors.
  • Property mode set to 100644
File size: 28.9 KB
Line 
1/*
2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
22 *          the baseline of the more general MIPS port. 
23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
26 *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
27 *          performance, tweaking this code and the isr vectoring routines
28 *          to reduce overhead & latencies.  Added optional
29 *          instrumentation as well.
30 *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
31 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
32 *          and deferred FP contexts.
33 *    2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing
34 *          by increasing the amount of context saved/restored.
35 *    2004: 24March, Art Ferrer, NASA/GSFC, added save of FP status/control
36 *          register to fix intermittent FP error encountered on ST5 mission
37 *          implementation on Mongoose V processor.
38 * 
39 *  COPYRIGHT (c) 1989-2002.
40 *  On-Line Applications Research Corporation (OAR).
41 *
42 *  The license and distribution terms for this file may be
43 *  found in the file LICENSE in this distribution or at
44 *  http://www.rtems.com/license/LICENSE.
45 *
46 *  $Id$
47 */
48
49#include <rtems/asm.h>
50#include <rtems/mips/iregdef.h>
51#include <rtems/mips/idtcpu.h>
52
53#define ASSEMBLY_ONLY
54#include <rtems/score/cpu.h>
55
56               
57/* enable debugging shadow writes to misc ram, this is a vestigal
58* Mongoose-ism debug tool- but may be handy in the future so we
59* left it in...
60*/
61
62/* #define INSTRUMENT_ISR_VECTORING */
63/* #define INSTRUMENT_EXECUTING_THREAD */
64
65
66       
67/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
68 *  and MIPS ISA Level 1 (R3xxx).
69 */
70
71#if __mips == 3
72/* 64 bit register operations */
73#define NOP     
74#define ADD     dadd
75#define STREG   sd
76#define LDREG   ld
77#define MFCO    dmfc0
78#define MTCO    dmtc0
79#define ADDU    addu
80#define ADDIU   addiu
81#define R_SZ    8
82#define F_SZ    8
83#define SZ_INT  8
84#define SZ_INT_POW2 3
85
86/* XXX if we don't always want 64 bit register ops, then another ifdef */
87
88#elif __mips == 1
89/* 32 bit register operations*/
90#define NOP     nop
91#define ADD     add
92#define STREG   sw
93#define LDREG   lw
94#define MFCO    mfc0
95#define MTCO    mtc0
96#define ADDU    add
97#define ADDIU   addi
98#define R_SZ    4
99#define F_SZ    4
100#define SZ_INT  4
101#define SZ_INT_POW2 2
102#else
103#error "mips assembly: what size registers do I deal with?"
104#endif
105
106
107#define ISR_VEC_SIZE    4
108#define EXCP_STACK_SIZE (NREGS*R_SZ)
109
110       
111#ifdef __GNUC__
112#define ASM_EXTERN(x,size) .extern x,size
113#else
114#define ASM_EXTERN(x,size)
115#endif
116
117/* NOTE: these constants must match the Context_Control structure in cpu.h */
118#define S0_OFFSET 0
119#define S1_OFFSET 1
120#define S2_OFFSET 2
121#define S3_OFFSET 3
122#define S4_OFFSET 4
123#define S5_OFFSET 5
124#define S6_OFFSET 6
125#define S7_OFFSET 7
126#define SP_OFFSET 8
127#define FP_OFFSET 9
128#define RA_OFFSET 10
129#define C0_SR_OFFSET 11
130#define C0_EPC_OFFSET 12
131
132/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
133#define FP0_OFFSET  0
134#define FP1_OFFSET  1
135#define FP2_OFFSET  2
136#define FP3_OFFSET  3
137#define FP4_OFFSET  4
138#define FP5_OFFSET  5
139#define FP6_OFFSET  6
140#define FP7_OFFSET  7
141#define FP8_OFFSET  8
142#define FP9_OFFSET  9
143#define FP10_OFFSET 10
144#define FP11_OFFSET 11
145#define FP12_OFFSET 12
146#define FP13_OFFSET 13
147#define FP14_OFFSET 14
148#define FP15_OFFSET 15
149#define FP16_OFFSET 16
150#define FP17_OFFSET 17
151#define FP18_OFFSET 18
152#define FP19_OFFSET 19
153#define FP20_OFFSET 20
154#define FP21_OFFSET 21
155#define FP22_OFFSET 22
156#define FP23_OFFSET 23
157#define FP24_OFFSET 24
158#define FP25_OFFSET 25
159#define FP26_OFFSET 26
160#define FP27_OFFSET 27
161#define FP28_OFFSET 28
162#define FP29_OFFSET 29
163#define FP30_OFFSET 30
164#define FP31_OFFSET 31
165#define FPCS_OFFSET 32
166
167       
168ASM_EXTERN(__exceptionStackFrame, SZ_INT)
169
170       
171               
172/*
173 *  _CPU_Context_save_fp_context
174 *
175 *  This routine is responsible for saving the FP context
176 *  at *fp_context_ptr.  If the point to load the FP context
177 *  from is changed then the pointer is modified by this routine.
178 *
179 *  Sometimes a macro implementation of this is in cpu.h which dereferences
180 *  the ** and a similarly named routine in this file is passed something
181 *  like a (Context_Control_fp *).  The general rule on making this decision
182 *  is to avoid writing assembly language.
183 */
184
185/* void _CPU_Context_save_fp(
186 *   void **fp_context_ptr
187 * );
188 */
189
190#if ( CPU_HARDWARE_FP == TRUE )
191FRAME(_CPU_Context_save_fp,sp,0,ra)
192        .set noreorder
193        .set noat
194
195        /*
196        ** Make sure the FPU is on before we save state.  This code
197        ** is here because the FPU context switch might occur when an
198        ** integer task is switching out with a FP task switching in.
199        */
200        MFC0    t0,C0_SR
201        li      t2,SR_CU1       
202        move    t1,t0
203        or      t0,t2           /* turn on the fpu */
204#if __mips == 3
205        li      t2,SR_EXL | SR_IE
206#elif __mips == 1
207        li      t2,SR_IEC
208#endif
209        not     t2
210        and     t0,t2           /* turn off interrupts */       
211        MTC0    t0,C0_SR       
212               
213        ld      a1,(a0)
214        move    t0,ra
215        jal     _CPU_Context_save_fp_from_exception
216        NOP
217       
218        /*
219        ** Reassert the task's state because we've not saved it yet.
220        */
221        MTC0    t1,C0_SR       
222        j       t0     
223        NOP
224       
225        .globl _CPU_Context_save_fp_from_exception
226_CPU_Context_save_fp_from_exception:
227        swc1 $f0,FP0_OFFSET*F_SZ(a1)
228        swc1 $f1,FP1_OFFSET*F_SZ(a1)
229        swc1 $f2,FP2_OFFSET*F_SZ(a1)
230        swc1 $f3,FP3_OFFSET*F_SZ(a1)
231        swc1 $f4,FP4_OFFSET*F_SZ(a1)
232        swc1 $f5,FP5_OFFSET*F_SZ(a1)
233        swc1 $f6,FP6_OFFSET*F_SZ(a1)
234        swc1 $f7,FP7_OFFSET*F_SZ(a1)
235        swc1 $f8,FP8_OFFSET*F_SZ(a1)
236        swc1 $f9,FP9_OFFSET*F_SZ(a1)
237        swc1 $f10,FP10_OFFSET*F_SZ(a1)
238        swc1 $f11,FP11_OFFSET*F_SZ(a1)
239        swc1 $f12,FP12_OFFSET*F_SZ(a1)
240        swc1 $f13,FP13_OFFSET*F_SZ(a1)
241        swc1 $f14,FP14_OFFSET*F_SZ(a1)
242        swc1 $f15,FP15_OFFSET*F_SZ(a1)
243        swc1 $f16,FP16_OFFSET*F_SZ(a1)
244        swc1 $f17,FP17_OFFSET*F_SZ(a1)
245        swc1 $f18,FP18_OFFSET*F_SZ(a1)
246        swc1 $f19,FP19_OFFSET*F_SZ(a1)
247        swc1 $f20,FP20_OFFSET*F_SZ(a1)
248        swc1 $f21,FP21_OFFSET*F_SZ(a1)
249        swc1 $f22,FP22_OFFSET*F_SZ(a1)
250        swc1 $f23,FP23_OFFSET*F_SZ(a1)
251        swc1 $f24,FP24_OFFSET*F_SZ(a1)
252        swc1 $f25,FP25_OFFSET*F_SZ(a1)
253        swc1 $f26,FP26_OFFSET*F_SZ(a1)
254        swc1 $f27,FP27_OFFSET*F_SZ(a1)
255        swc1 $f28,FP28_OFFSET*F_SZ(a1)
256        swc1 $f29,FP29_OFFSET*F_SZ(a1)
257        swc1 $f30,FP30_OFFSET*F_SZ(a1)
258        swc1 $f31,FP31_OFFSET*F_SZ(a1)
259        cfc1 a0,$31                    /* Read FP status/conrol reg */
260        cfc1 a0,$31                    /* Two reads clear pipeline */
261        NOP
262        NOP
263        sw a0, FPCS_OFFSET*F_SZ(a1)    /* Store value to FPCS location */
264        NOP
265        j ra
266        NOP
267        .set at
268ENDFRAME(_CPU_Context_save_fp)
269#endif
270
271/*
272 *  _CPU_Context_restore_fp_context
273 *
274 *  This routine is responsible for restoring the FP context
275 *  at *fp_context_ptr.  If the point to load the FP context
276 *  from is changed then the pointer is modified by this routine.
277 *
278 *  Sometimes a macro implementation of this is in cpu.h which dereferences
279 *  the ** and a similarly named routine in this file is passed something
280 *  like a (Context_Control_fp *).  The general rule on making this decision
281 *  is to avoid writing assembly language.
282 */
283
284/* void _CPU_Context_restore_fp(
285 *   void **fp_context_ptr
286 * )
287 */
288
289#if ( CPU_HARDWARE_FP == TRUE )
290FRAME(_CPU_Context_restore_fp,sp,0,ra)
291        .set noat
292        .set noreorder
293       
294        /*
295        ** Make sure the FPU is on before we retrieve state.  This code
296        ** is here because the FPU context switch might occur when an
297        ** integer task is switching out with a FP task switching in.
298        */
299        MFC0    t0,C0_SR
300        li      t2,SR_CU1       
301        move    t1,t0
302        or      t0,t2           /* turn on the fpu */
303#if __mips == 3
304        li      t2,SR_EXL | SR_IE
305#elif __mips == 1
306        li      t2,SR_IEC
307#endif
308        not     t2
309        and     t0,t2           /* turn off interrupts */       
310        MTC0    t0,C0_SR       
311
312        ld      a1,(a0)
313        move    t0,ra
314        jal     _CPU_Context_restore_fp_from_exception
315        NOP
316
317        /*
318        ** Reassert the old task's state because we've not restored the
319        ** new one yet.
320        */
321        MTC0    t1,C0_SR       
322        j       t0
323        NOP
324       
325        .globl _CPU_Context_restore_fp_from_exception
326_CPU_Context_restore_fp_from_exception:
327        lwc1 $f0,FP0_OFFSET*4(a1)
328        lwc1 $f1,FP1_OFFSET*4(a1)
329        lwc1 $f2,FP2_OFFSET*4(a1)
330        lwc1 $f3,FP3_OFFSET*4(a1)
331        lwc1 $f4,FP4_OFFSET*4(a1)
332        lwc1 $f5,FP5_OFFSET*4(a1)
333        lwc1 $f6,FP6_OFFSET*4(a1)
334        lwc1 $f7,FP7_OFFSET*4(a1)
335        lwc1 $f8,FP8_OFFSET*4(a1)
336        lwc1 $f9,FP9_OFFSET*4(a1)
337        lwc1 $f10,FP10_OFFSET*4(a1)
338        lwc1 $f11,FP11_OFFSET*4(a1)
339        lwc1 $f12,FP12_OFFSET*4(a1)
340        lwc1 $f13,FP13_OFFSET*4(a1)
341        lwc1 $f14,FP14_OFFSET*4(a1)
342        lwc1 $f15,FP15_OFFSET*4(a1)
343        lwc1 $f16,FP16_OFFSET*4(a1)
344        lwc1 $f17,FP17_OFFSET*4(a1)
345        lwc1 $f18,FP18_OFFSET*4(a1)
346        lwc1 $f19,FP19_OFFSET*4(a1)
347        lwc1 $f20,FP20_OFFSET*4(a1)
348        lwc1 $f21,FP21_OFFSET*4(a1)
349        lwc1 $f22,FP22_OFFSET*4(a1)
350        lwc1 $f23,FP23_OFFSET*4(a1)
351        lwc1 $f24,FP24_OFFSET*4(a1)
352        lwc1 $f25,FP25_OFFSET*4(a1)
353        lwc1 $f26,FP26_OFFSET*4(a1)
354        lwc1 $f27,FP27_OFFSET*4(a1)
355        lwc1 $f28,FP28_OFFSET*4(a1)
356        lwc1 $f29,FP29_OFFSET*4(a1)
357        lwc1 $f30,FP30_OFFSET*4(a1)
358        lwc1 $f31,FP31_OFFSET*4(a1)
359        cfc1 a0,$31                  /* Read from FP status/control reg */
360        cfc1 a0,$31                  /* Two reads clear pipeline */
361        NOP                          /* NOPs ensure execution */
362        NOP
363        lw a0,FPCS_OFFSET*4(a1)      /* Load saved FPCS value */
364        NOP
365        ctc1 a0,$31                  /* Restore FPCS register */
366        NOP
367        j ra
368        NOP
369        .set at
370ENDFRAME(_CPU_Context_restore_fp)
371#endif
372
373/*  _CPU_Context_switch
374 *
375 *  This routine performs a normal non-FP context switch.
376 */
377
378/* void _CPU_Context_switch(
379 *   Context_Control  *run,
380 *   Context_Control  *heir
381 * )
382 */
383
384FRAME(_CPU_Context_switch,sp,0,ra)
385        .set noreorder
386
387        MFC0    t0,C0_SR
388#if __mips == 3
389        li      t1,SR_EXL | SR_IE
390#elif __mips == 1
391        li      t1,SR_IEC
392#endif
393        STREG   t0,C0_SR_OFFSET*R_SZ(a0)        /* save the task's SR */
394        not     t1
395        and     t0,t1                           /* mask off interrupts while we context switch */
396        MTC0    t0,C0_SR
397        NOP
398
399        STREG ra,RA_OFFSET*R_SZ(a0)             /* save current context */
400        STREG sp,SP_OFFSET*R_SZ(a0)
401        STREG fp,FP_OFFSET*R_SZ(a0)
402        STREG s0,S0_OFFSET*R_SZ(a0)
403        STREG s1,S1_OFFSET*R_SZ(a0)
404        STREG s2,S2_OFFSET*R_SZ(a0)
405        STREG s3,S3_OFFSET*R_SZ(a0)
406        STREG s4,S4_OFFSET*R_SZ(a0)
407        STREG s5,S5_OFFSET*R_SZ(a0)
408        STREG s6,S6_OFFSET*R_SZ(a0)
409        STREG s7,S7_OFFSET*R_SZ(a0)
410
411       
412        /*
413        ** this code grabs the userspace EPC if we're dispatching from
414        ** an interrupt frame or supplies the address of the dispatch
415        ** routines if not.  This is entirely for the gdbstub's benefit so
416        ** it can know where each task is running.
417        **
418        ** Its value is only set when calling threadDispatch from
419        ** the interrupt handler and is cleared immediately when this
420        ** routine gets it.
421        */
422       
423        la      t0,__exceptionStackFrame        /* see if we're coming in from an exception */
424        LDREG   t1, (t0)
425        NOP
426        beqz    t1,1f
427
428        STREG   zero, (t0)                      /* and clear it */
429        NOP
430        LDREG   t0,R_EPC*R_SZ(t1)               /* get the userspace EPC from the frame */
431        b       2f
432               
4331:      la    t0,_Thread_Dispatch               /* if ==0, we're switched out */
434
4352:      STREG   t0,C0_EPC_OFFSET*R_SZ(a0)
436       
437
438_CPU_Context_switch_restore:
439        LDREG ra,RA_OFFSET*R_SZ(a1)             /* restore context */
440        LDREG sp,SP_OFFSET*R_SZ(a1)
441        LDREG fp,FP_OFFSET*R_SZ(a1)
442        LDREG s0,S0_OFFSET*R_SZ(a1)
443        LDREG s1,S1_OFFSET*R_SZ(a1)
444        LDREG s2,S2_OFFSET*R_SZ(a1)
445        LDREG s3,S3_OFFSET*R_SZ(a1)
446        LDREG s4,S4_OFFSET*R_SZ(a1)
447        LDREG s5,S5_OFFSET*R_SZ(a1)
448        LDREG s6,S6_OFFSET*R_SZ(a1)
449        LDREG s7,S7_OFFSET*R_SZ(a1)
450
451        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
452       
453//      NOP
454//#if __mips == 3
455//        andi  t0,SR_EXL
456//        bnez  t0,_CPU_Context_1   /* set exception level from restore context */
457//        li    t0,~SR_EXL
458//        MFC0  t1,C0_SR
459//        NOP
460//        and   t1,t0
461//        MTC0  t1,C0_SR
462//
463//#elif __mips == 1
464//
465//        andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
466//        beq   t0,$0,_CPU_Context_1          /* set level from restore context */
467//        MFC0  t0,C0_SR
468//        NOP
469//        or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  */
470//        MTC0  t0,C0_SR                      /* set with enabled */
471//        NOP
472
473
474/*
475** Incorporate the incoming task's FP coprocessor state and interrupt mask/enable
476** into the status register.  We jump thru the requisite hoops to ensure we
477** maintain all other SR bits as global values.
478**
479** Get the task's FPU enable, int mask & int enable bits.  Although we keep the
480** software int enables on a per-task basis, the rtems_task_create
481** Interrupt Level & int level manipulation functions cannot enable/disable them,
482** so they are automatically enabled for all tasks.  To turn them off, a task 
483** must itself manipulate the SR register. 
484**
485** Although something of a hack on this processor, we treat the SR register
486** int enables as the RTEMS interrupt level.  We use the int level
487** value as a bitmask, not as any sort of greater than/less than metric.
488** Manipulation of a task's interrupt level directly corresponds to manipulation
489** of that task's SR bits, as seen in cpu.c
490**
491** Note, interrupts are disabled before context is saved, though the task's
492** interrupt enable state is recorded.  The task swapping in will apply its
493** specific SR bits, including interrupt enable.  If further task-specific
494** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and
495** cpu.h task initialization code that will be affected. 
496*/
497
498        li      t2,SR_CU1
499        or      t2,SR_IMASK
500
501        /* int enable bits */
502#if __mips == 3
503        or      t2,SR_EXL + SR_IE
504#elif __mips == 1
505        /*
506        ** Save current, previous & old int enables.  This is key because
507        ** we can dispatch from within the stack frame used by an
508        ** interrupt service.  The int enables nest, but not beyond
509        ** previous and old because of the dispatch interlock seen
510        ** in the interrupt processing code
511        */
512        or      t2,SR_IEC + SR_IEP + SR_IEO
513#endif
514        and     t0,t2           /* keep only the per-task bits */
515               
516        MFC0    t1,C0_SR        /* grab the current SR */
517        not     t2             
518        and     t1,t2           /* mask off the old task's bits */
519        or      t1,t0           /* or in the new task's bits */
520        MTC0    t1,C0_SR        /* and load the new SR */
521        NOP
522       
523/* _CPU_Context_1: */
524        j       ra
525        NOP
526ENDFRAME(_CPU_Context_switch)
527
528       
529/*
530 *  _CPU_Context_restore
531 *
532 *  This routine is generally used only to restart self in an
533 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
534 *
535 *  NOTE: May be unnecessary to reload some registers.
536 *
537 *  void _CPU_Context_restore(
538 *    Context_Control *new_context
539 *  );
540 */
541
542FRAME(_CPU_Context_restore,sp,0,ra)
543        .set noreorder
544        move    a1,a0
545        j       _CPU_Context_switch_restore
546        NOP
547
548ENDFRAME(_CPU_Context_restore)
549
550       
551ASM_EXTERN(_ISR_Nest_level, SZ_INT)
552ASM_EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
553ASM_EXTERN(_Context_Switch_necessary,SZ_INT)
554ASM_EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
555ASM_EXTERN(_Thread_Executing,SZ_INT)
556       
557.extern _Thread_Dispatch
558.extern _ISR_Vector_table
559
560
561       
562
563
564/*  void _DBG_Handler()
565 *
566 *  This routine services the (at least) MIPS1 debug vector,
567 *  only used the the hardware debugging features.  This code,
568 *  while optional, is best located here because its intrinsically
569 *  associated with exceptions in general & thus tied pretty
570 *  closely to _ISR_Handler.
571 *
572 */
573
574
575FRAME(_DBG_Handler,sp,0,ra)
576        .set noreorder
577        la      k0,_ISR_Handler
578        j       k0
579        NOP
580        .set reorder
581ENDFRAME(_DBG_Handler)
582
583
584
585
586       
587/*  void __ISR_Handler()
588 *
589 *  This routine provides the RTEMS interrupt management.
590 *
591 *  void _ISR_Handler()
592 *
593 *
594 *  This discussion ignores a lot of the ugly details in a real
595 *  implementation such as saving enough registers/state to be
596 *  able to do something real.  Keep in mind that the goal is
597 *  to invoke a user's ISR handler which is written in C and
598 *  uses a certain set of registers.
599 *
600 *  Also note that the exact order is to a large extent flexible.
601 *  Hardware will dictate a sequence for a certain subset of
602 *  _ISR_Handler while requirements for setting
603 *
604 *  At entry to "common" _ISR_Handler, the vector number must be
605 *  available.  On some CPUs the hardware puts either the vector
606 *  number or the offset into the vector table for this ISR in a
607 *  known place.  If the hardware does not give us this information,
608 *  then the assembly portion of RTEMS for this port will contain
609 *  a set of distinct interrupt entry points which somehow place
610 *  the vector number in a known place (which is safe if another
611 *  interrupt nests this one) and branches to _ISR_Handler.
612 *
613 */
614
615FRAME(_ISR_Handler,sp,0,ra)
616        .set noreorder
617
618        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
619
620        /* wastes a lot of stack space for context?? */
621        ADDIU    sp,sp,-EXCP_STACK_SIZE
622
623        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
624        STREG v0, R_V0*R_SZ(sp)
625        STREG v1, R_V1*R_SZ(sp)
626        STREG a0, R_A0*R_SZ(sp)
627        STREG a1, R_A1*R_SZ(sp)
628        STREG a2, R_A2*R_SZ(sp)
629        STREG a3, R_A3*R_SZ(sp)
630        STREG t0, R_T0*R_SZ(sp)
631        STREG t1, R_T1*R_SZ(sp)
632        STREG t2, R_T2*R_SZ(sp)
633        STREG t3, R_T3*R_SZ(sp)
634        STREG t4, R_T4*R_SZ(sp)
635        STREG t5, R_T5*R_SZ(sp)
636        STREG t6, R_T6*R_SZ(sp)
637        STREG t7, R_T7*R_SZ(sp)
638        mflo  t0
639        STREG t8, R_T8*R_SZ(sp)
640        STREG t0, R_MDLO*R_SZ(sp)
641        STREG t9, R_T9*R_SZ(sp)
642        mfhi  t0
643        STREG gp, R_GP*R_SZ(sp)
644        STREG t0, R_MDHI*R_SZ(sp)
645        STREG fp, R_FP*R_SZ(sp)
646       
647        .set noat
648        STREG AT, R_AT*R_SZ(sp)
649        .set at
650
651        MFC0     t0,C0_SR
652        MFC0     t1,C0_EPC
653        STREG    t0,R_SR*R_SZ(sp)
654        STREG    t1,R_EPC*R_SZ(sp)
655       
656
657#ifdef INSTRUMENT_EXECUTING_THREAD
658        lw t2, _Thread_Executing
659        NOP
660        sw t2, 0x8001FFF0
661#endif
662       
663        /* determine if an interrupt generated this exception */
664
665        MFC0     t0,C0_CAUSE
666        NOP
667
668        and      t1,t0,CAUSE_EXCMASK
669        beq      t1, 0, _ISR_Handler_1
670
671_ISR_Handler_Exception:
672
673        /*  If we return from the exception, it is assumed nothing
674         *  bad is going on and we can continue to run normally.
675         *  But we want to save the entire CPU context so exception
676         *  handlers can look at it and change it.
677         *
678         *  NOTE: This is the path the debugger stub will take.
679         */
680
681        /* already got t0 = cause in the interrupt test above */
682        STREG    t0,R_CAUSE*R_SZ(sp)
683
684        STREG    sp, R_SP*R_SZ(sp)
685       
686        STREG    s0,R_S0*R_SZ(sp)     /* save s0 - s7 */
687        STREG    s1,R_S1*R_SZ(sp)
688        STREG    s2,R_S2*R_SZ(sp)
689        STREG    s3,R_S3*R_SZ(sp)
690        STREG    s4,R_S4*R_SZ(sp)
691        STREG    s5,R_S5*R_SZ(sp)
692        STREG    s6,R_S6*R_SZ(sp)
693        STREG    s7,R_S7*R_SZ(sp)
694
695        /* CP0 special registers */
696
697#if __mips == 1
698        MFC0     t0,C0_TAR
699#endif
700        MFC0     t1,C0_BADVADDR
701       
702#if __mips == 1
703        STREG    t0,R_TAR*R_SZ(sp)
704#else
705        NOP
706#endif
707        STREG    t1,R_BADVADDR*R_SZ(sp)
708       
709#if ( CPU_HARDWARE_FP == TRUE )
710        MFC0     t0,C0_SR                 /* FPU is enabled, save state */
711        NOP
712        srl      t0,t0,16
713        andi     t0,t0,(SR_CU1 >> 16)
714        beqz     t0, 1f
715        NOP
716       
717        la       a1,R_F0*R_SZ(sp)
718        jal      _CPU_Context_save_fp_from_exception
719        NOP
720        MFC1     t0,C1_REVISION
721        MFC1     t1,C1_STATUS
722        STREG    t0,R_FEIR*R_SZ(sp)
723        STREG    t1,R_FCSR*R_SZ(sp)
724       
7251:     
726#endif
727       
728        move     a0,sp
729        jal      mips_vector_exceptions
730        NOP
731
732       
733        /*
734        ** note, if the exception vector returns, rely on it to have
735        ** adjusted EPC so we will return to some correct address.  If
736        ** this is not done, we might get stuck in an infinite loop because
737        ** we'll return to the instruction where the exception occured and
738        ** it could throw again.
739        **
740        ** It is expected the only code using the exception processing is
741        ** either the gdb stub or some user code which is either going to
742        ** panic or do something useful.  Regardless, it is up to each
743        ** exception routine to properly adjust EPC, so the code below
744        ** may be helpful for doing just that.
745        */
746       
747/* *********************************************************************
748** this code follows the R3000's exception return logic, but is not
749** needed because the gdb stub does it for us.  It might be useful
750** for something else at some point...
751**
752        * compute the address of the instruction we'll return to *
753
754        LDREG   t1, R_CAUSE*R_SZ(sp)
755        LDREG   t0, R_EPC*R_SZ(sp)
756
757        * first see if the exception happened in the delay slot *
758        li      t3,CAUSE_BD
759        AND     t4,t1,t3
760        beqz    t4,excnodelay
761        NOP
762       
763        * it did, now see if the branch occured or not *
764        li      t3,CAUSE_BT
765        AND     t4,t1,t3
766        beqz    t4,excnobranch
767        NOP
768       
769        * branch was taken, we resume at the branch target *
770        LDREG   t0, R_TAR*R_SZ(sp)
771        j       excreturn
772        NOP
773
774excnobranch:
775        ADDU    t0,R_SZ
776
777excnodelay:     
778        ADDU    t0,R_SZ
779               
780excreturn:     
781        STREG   t0, R_EPC*R_SZ(sp)
782        NOP
783********************************************************************* */
784       
785
786 /* if we're returning into mips_break, move to the next instruction */
787       
788        LDREG   t0,R_EPC*R_SZ(sp)
789        la      t1,mips_break
790        xor     t2,t0,t1
791        bnez    t2,3f
792       
793        addu    t0,R_SZ
794        STREG   t0,R_EPC*R_SZ(sp)
795        NOP
7963:     
797
798       
799       
800               
801#if ( CPU_HARDWARE_FP == TRUE )
802        MFC0     t0,C0_SR               /* FPU is enabled, restore state */
803        NOP
804        srl      t0,t0,16
805        andi     t0,t0,(SR_CU1 >> 16)
806        beqz     t0, 2f
807        NOP
808       
809        la       a1,R_F0*R_SZ(sp)
810        jal      _CPU_Context_restore_fp_from_exception
811        NOP
812        LDREG    t0,R_FEIR*R_SZ(sp)
813        LDREG    t1,R_FCSR*R_SZ(sp)
814        MTC1     t0,C1_REVISION
815        MTC1     t1,C1_STATUS
8162:
817#endif
818        LDREG    s0,R_S0*R_SZ(sp)    /* restore s0 - s7 */
819        LDREG    s1,R_S1*R_SZ(sp)
820        LDREG    s2,R_S2*R_SZ(sp)
821        LDREG    s3,R_S3*R_SZ(sp)
822        LDREG    s4,R_S4*R_SZ(sp)
823        LDREG    s5,R_S5*R_SZ(sp)
824        LDREG    s6,R_S6*R_SZ(sp)
825        LDREG    s7,R_S7*R_SZ(sp)
826
827        /* do NOT restore the sp as this could mess up the world */
828        /* do NOT restore the cause as this could mess up the world */
829
830        /*
831        ** Jump all the way out.  If theres a pending interrupt, just
832        ** let it be serviced later.  Since we're probably using the
833        ** gdb stub, we've already disrupted the ISR service timing
834        ** anyhow.  We oughtn't mix exception and interrupt processing
835        ** in the same exception call in case the exception stuff
836        ** might interfere with the dispatching & timer ticks.
837        */
838        j        _ISR_Handler_exit
839        NOP
840
841_ISR_Handler_1:
842
843        MFC0     t1,C0_SR
844        and      t0,CAUSE_IPMASK
845        and      t0,t1
846
847        /* external interrupt not enabled, ignore */
848        /* but if it's not an exception or an interrupt, */
849        /* Then where did it come from??? */
850       
851        beq      t0,zero,_ISR_Handler_exit
852
853       
854       
855               
856  /*
857   *  save some or all context on stack
858   *  may need to save some special interrupt information for exit
859   *
860   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
861   *    if ( _ISR_Nest_level == 0 )
862   *      switch to software interrupt stack
863   *  #endif
864   */
865
866  /*
867   *  _ISR_Nest_level++;
868   */
869        LDREG  t0,_ISR_Nest_level
870        NOP
871        ADD    t0,t0,1
872        STREG  t0,_ISR_Nest_level
873  /*
874   *  _Thread_Dispatch_disable_level++;
875   */
876        LDREG  t1,_Thread_Dispatch_disable_level
877        NOP
878        ADD    t1,t1,1
879        STREG  t1,_Thread_Dispatch_disable_level
880
881  /*
882   *  Call the CPU model or BSP specific routine to decode the
883   *  interrupt source and actually vector to device ISR handlers.
884   */
885       
886#ifdef INSTRUMENT_ISR_VECTORING
887        NOP
888        li      t1, 1
889        sw      t1, 0x8001e000
890#endif
891
892        move     a0,sp
893        jal      mips_vector_isr_handlers
894        NOP
895       
896#ifdef INSTRUMENT_ISR_VECTORING
897        li      t1, 0
898        sw      t1, 0x8001e000
899        NOP
900#endif
901               
902  /*
903   *  --_ISR_Nest_level;
904   */
905        LDREG  t2,_ISR_Nest_level
906        NOP
907        ADD    t2,t2,-1
908        STREG  t2,_ISR_Nest_level
909  /*
910   *  --_Thread_Dispatch_disable_level;
911   */
912        LDREG  t1,_Thread_Dispatch_disable_level
913        NOP
914        ADD    t1,t1,-1
915        STREG  t1,_Thread_Dispatch_disable_level
916  /*
917   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
918   *    goto the label "exit interrupt (simple case)"
919   */
920        or  t0,t2,t1
921        bne t0,zero,_ISR_Handler_exit
922        NOP
923
924
925
926       
927  /*
928   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
929   *    restore stack
930   *  #endif
931   * 
932   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
933   *    goto the label "exit interrupt (simple case)"
934   */
935        LDREG t0,_Context_Switch_necessary
936        LDREG t1,_ISR_Signals_to_thread_executing
937        NOP
938        or    t0,t0,t1
939        beq   t0,zero,_ISR_Handler_exit
940        NOP
941
942       
943       
944#ifdef INSTRUMENT_EXECUTING_THREAD
945        lw      t0,_Thread_Executing
946        NOP
947        sw      t0,0x8001FFF4
948#endif
949
950/*
951** Turn on interrupts before entering Thread_Dispatch which
952** will run for a while, thus allowing new interrupts to
953** be serviced.  Observe the Thread_Dispatch_disable_level interlock
954** that prevents recursive entry into Thread_Dispatch.
955*/
956
957        MFC0    t0, C0_SR
958#if __mips == 3
959        li      t1,SR_EXL | SR_IE
960#elif __mips == 1
961        li      t1,SR_IEC
962#endif
963        or      t0, t1
964        MTC0    t0, C0_SR
965        NOP
966
967        /* save off our stack frame so the context switcher can get to it */
968        la      t0,__exceptionStackFrame
969        STREG   sp,(t0)
970                                       
971        jal     _Thread_Dispatch
972        NOP
973
974        /* and make sure its clear in case we didn't dispatch.  if we did, its
975        ** already cleared */
976        la      t0,__exceptionStackFrame
977        STREG   zero,(t0)
978        NOP
979
980/*
981** turn interrupts back off while we restore context so
982** a badly timed interrupt won't accidentally mess things up
983*/
984        MFC0    t0, C0_SR
985#if __mips == 3
986        li      t1,SR_EXL | SR_IE
987#elif __mips == 1
988        /* ints off, current & prev kernel mode on (kernel mode enabled is bit clear..argh!) */
989        li      t1,SR_IEC | SR_KUP | SR_KUC     
990#endif
991        not     t1
992        and     t0, t1
993
994#if __mips == 1
995        /* disabled 7/29, gregm, this tasks context was saved previously in an interrupt,
996        ** so we'll just restore the task's previous interrupt enables.
997
998        **
999        ** make sure previous int enable is on  because we're returning from an interrupt
1000        ** which means interrupts have to be enabled
1001       
1002        li      t1,SR_IEP
1003        or      t0,t1
1004        */
1005#endif
1006        MTC0    t0, C0_SR
1007        NOP
1008       
1009#ifdef INSTRUMENT_EXECUTING_THREAD
1010        lw      t0,_Thread_Executing
1011        NOP
1012        sw      t0,0x8001FFF8
1013#endif
1014
1015       
1016  /*
1017   *  prepare to get out of interrupt
1018   *  return from interrupt  (maybe to _ISR_Dispatch)
1019   *
1020   *  LABEL "exit interrupt (simple case):"
1021   *  prepare to get out of interrupt
1022   *  return from interrupt
1023   */
1024
1025_ISR_Handler_exit:
1026/*
1027** Skip the SR restore because its a global register. _CPU_Context_switch_restore
1028** adjusts it according to each task's configuration.  If we didn't dispatch, the
1029** SR value isn't changed, so all we need to do is return.
1030**
1031*/
1032        /* restore context from stack */
1033       
1034#ifdef INSTRUMENT_EXECUTING_THREAD
1035        lw      t0,_Thread_Executing
1036        NOP
1037        sw      t0, 0x8001FFFC
1038#endif
1039
1040        LDREG t8, R_MDLO*R_SZ(sp)
1041        LDREG t0, R_T0*R_SZ(sp)
1042        mtlo  t8
1043        LDREG t8, R_MDHI*R_SZ(sp)           
1044        LDREG t1, R_T1*R_SZ(sp)
1045        mthi  t8
1046        LDREG t2, R_T2*R_SZ(sp)
1047        LDREG t3, R_T3*R_SZ(sp)
1048        LDREG t4, R_T4*R_SZ(sp)
1049        LDREG t5, R_T5*R_SZ(sp)
1050        LDREG t6, R_T6*R_SZ(sp)
1051        LDREG t7, R_T7*R_SZ(sp)
1052        LDREG t8, R_T8*R_SZ(sp)
1053        LDREG t9, R_T9*R_SZ(sp)
1054        LDREG gp, R_GP*R_SZ(sp)
1055        LDREG fp, R_FP*R_SZ(sp)
1056        LDREG ra, R_RA*R_SZ(sp)
1057        LDREG a0, R_A0*R_SZ(sp)
1058        LDREG a1, R_A1*R_SZ(sp)
1059        LDREG a2, R_A2*R_SZ(sp)
1060        LDREG a3, R_A3*R_SZ(sp)
1061        LDREG v1, R_V1*R_SZ(sp)
1062        LDREG v0, R_V0*R_SZ(sp)
1063       
1064        LDREG     k1, R_EPC*R_SZ(sp)
1065       
1066        .set noat
1067        LDREG     AT, R_AT*R_SZ(sp)
1068        .set at
1069
1070        ADDIU     sp,sp,EXCP_STACK_SIZE
1071        j         k1
1072        rfe
1073        NOP
1074
1075       .set    reorder
1076ENDFRAME(_ISR_Handler)
1077
1078
1079
1080       
1081FRAME(mips_break,sp,0,ra)
1082        .set noreorder
1083        break   0x0     /* this statement must be first in this function, assumed so by mips-stub.c */
1084        NOP
1085        j       ra
1086        NOP
1087       .set    reorder
1088ENDFRAME(mips_break)
1089
Note: See TracBrowser for help on using the repository browser.