source: rtems/c/src/exec/score/cpu/mips/cpu_asm.S @ f64f1816

4.104.114.84.95
Last change on this file since f64f1816 was f64f1816, checked in by Joel Sherrill <joel.sherrill@…>, on 10/12/01 at 17:11:40

2001-10-12 Joel Sherrill <joel@…>

  • cpu_asm.S: _CPU_Context_save_fp in was incorrectly in conditional compilation block with (CPU_HARDWARE_FP == FALSE). Reported by Wayne Bullaughey <wayne@…>.
  • Property mode set to 100644
File size: 18.4 KB
Line 
1/*
2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
22 *          the baseline of the more general MIPS port. 
23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
26 * 
27 *  COPYRIGHT (c) 1989-2000.
28 *  On-Line Applications Research Corporation (OAR).
29 *
30 *  The license and distribution terms for this file may be
31 *  found in the file LICENSE in this distribution or at
32 *  http://www.OARcorp.com/rtems/license.html.
33 *
34 *  $Id$
35 */
36
37#include <asm.h>
38#include "iregdef.h"
39#include "idtcpu.h"
40
41/* enable debugging shadow writes to misc ram, this is a vestigal
42* Mongoose-ism debug tool- but may be handy in the future so we
43* left it in...
44*/
45/* #define INSTRUMENT */
46       
47
48
49       
50/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
51 *  and MIPS ISA Level 1 (R3xxx).
52 */
53
54#if __mips == 3
55/* 64 bit register operations */
56#define NOP     
57#define ADD     dadd
58#define STREG   sd
59#define LDREG   ld
60#define MFCO    dmfc0
61#define MTCO    dmtc0
62#define ADDU    addu
63#define ADDIU   addiu
64#define R_SZ    8
65#define F_SZ    8
66#define SZ_INT  8
67#define SZ_INT_POW2 3
68
69/* XXX if we don't always want 64 bit register ops, then another ifdef */
70
71#elif __mips == 1
72/* 32 bit register operations*/
73#define NOP     nop
74#define ADD     add
75#define STREG   sw
76#define LDREG   lw
77#define MFCO    mfc0
78#define MTCO    mtc0
79#define ADDU    add
80#define ADDIU   addi
81#define R_SZ    4
82#define F_SZ    4
83#define SZ_INT  4
84#define SZ_INT_POW2 2
85#else
86#error "mips assembly: what size registers do I deal with?"
87#endif
88
89
90#define ISR_VEC_SIZE    4
91#define EXCP_STACK_SIZE (NREGS*R_SZ)
92
93       
94#ifdef __GNUC__
95#define ASM_EXTERN(x,size) .extern x,size
96#else
97#define ASM_EXTERN(x,size)
98#endif
99
100/* NOTE: these constants must match the Context_Control structure in cpu.h */
101#define S0_OFFSET 0
102#define S1_OFFSET 1
103#define S2_OFFSET 2
104#define S3_OFFSET 3
105#define S4_OFFSET 4
106#define S5_OFFSET 5
107#define S6_OFFSET 6
108#define S7_OFFSET 7
109#define SP_OFFSET 8
110#define FP_OFFSET 9
111#define RA_OFFSET 10
112#define C0_SR_OFFSET 11
113/* #define C0_EPC_OFFSET 12 */
114
115/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
116#define FP0_OFFSET  0
117#define FP1_OFFSET  1
118#define FP2_OFFSET  2
119#define FP3_OFFSET  3
120#define FP4_OFFSET  4
121#define FP5_OFFSET  5
122#define FP6_OFFSET  6
123#define FP7_OFFSET  7
124#define FP8_OFFSET  8
125#define FP9_OFFSET  9
126#define FP10_OFFSET 10
127#define FP11_OFFSET 11
128#define FP12_OFFSET 12
129#define FP13_OFFSET 13
130#define FP14_OFFSET 14
131#define FP15_OFFSET 15
132#define FP16_OFFSET 16
133#define FP17_OFFSET 17
134#define FP18_OFFSET 18
135#define FP19_OFFSET 19
136#define FP20_OFFSET 20
137#define FP21_OFFSET 21
138#define FP22_OFFSET 22
139#define FP23_OFFSET 23
140#define FP24_OFFSET 24
141#define FP25_OFFSET 25
142#define FP26_OFFSET 26
143#define FP27_OFFSET 27
144#define FP28_OFFSET 28
145#define FP29_OFFSET 29
146#define FP30_OFFSET 30
147#define FP31_OFFSET 31
148
149
150/*
151 *  _CPU_Context_save_fp_context
152 *
153 *  This routine is responsible for saving the FP context
154 *  at *fp_context_ptr.  If the point to load the FP context
155 *  from is changed then the pointer is modified by this routine.
156 *
157 *  Sometimes a macro implementation of this is in cpu.h which dereferences
158 *  the ** and a similarly named routine in this file is passed something
159 *  like a (Context_Control_fp *).  The general rule on making this decision
160 *  is to avoid writing assembly language.
161 */
162
163/* void _CPU_Context_save_fp(
164 *   void **fp_context_ptr
165 * );
166 */
167
168#if ( CPU_HARDWARE_FP == TRUE )
169FRAME(_CPU_Context_save_fp,sp,0,ra)
170        .set noat
171        ld   a1,(a0)
172        NOP
173        swc1 $f0,FP0_OFFSET*F_SZ(a1)
174        swc1 $f1,FP1_OFFSET*F_SZ(a1)
175        swc1 $f2,FP2_OFFSET*F_SZ(a1)
176        swc1 $f3,FP3_OFFSET*F_SZ(a1)
177        swc1 $f4,FP4_OFFSET*F_SZ(a1)
178        swc1 $f5,FP5_OFFSET*F_SZ(a1)
179        swc1 $f6,FP6_OFFSET*F_SZ(a1)
180        swc1 $f7,FP7_OFFSET*F_SZ(a1)
181        swc1 $f8,FP8_OFFSET*F_SZ(a1)
182        swc1 $f9,FP9_OFFSET*F_SZ(a1)
183        swc1 $f10,FP10_OFFSET*F_SZ(a1)
184        swc1 $f11,FP11_OFFSET*F_SZ(a1)
185        swc1 $f12,FP12_OFFSET*F_SZ(a1)
186        swc1 $f13,FP13_OFFSET*F_SZ(a1)
187        swc1 $f14,FP14_OFFSET*F_SZ(a1)
188        swc1 $f15,FP15_OFFSET*F_SZ(a1)
189        swc1 $f16,FP16_OFFSET*F_SZ(a1)
190        swc1 $f17,FP17_OFFSET*F_SZ(a1)
191        swc1 $f18,FP18_OFFSET*F_SZ(a1)
192        swc1 $f19,FP19_OFFSET*F_SZ(a1)
193        swc1 $f20,FP20_OFFSET*F_SZ(a1)
194        swc1 $f21,FP21_OFFSET*F_SZ(a1)
195        swc1 $f22,FP22_OFFSET*F_SZ(a1)
196        swc1 $f23,FP23_OFFSET*F_SZ(a1)
197        swc1 $f24,FP24_OFFSET*F_SZ(a1)
198        swc1 $f25,FP25_OFFSET*F_SZ(a1)
199        swc1 $f26,FP26_OFFSET*F_SZ(a1)
200        swc1 $f27,FP27_OFFSET*F_SZ(a1)
201        swc1 $f28,FP28_OFFSET*F_SZ(a1)
202        swc1 $f29,FP29_OFFSET*F_SZ(a1)
203        swc1 $f30,FP30_OFFSET*F_SZ(a1)
204        swc1 $f31,FP31_OFFSET*F_SZ(a1)
205        j ra
206        nop
207        .set at
208ENDFRAME(_CPU_Context_save_fp)
209#endif
210
211/*
212 *  _CPU_Context_restore_fp_context
213 *
214 *  This routine is responsible for restoring the FP context
215 *  at *fp_context_ptr.  If the point to load the FP context
216 *  from is changed then the pointer is modified by this routine.
217 *
218 *  Sometimes a macro implementation of this is in cpu.h which dereferences
219 *  the ** and a similarly named routine in this file is passed something
220 *  like a (Context_Control_fp *).  The general rule on making this decision
221 *  is to avoid writing assembly language.
222 */
223
224/* void _CPU_Context_restore_fp(
225 *   void **fp_context_ptr
226 * )
227 */
228
229#if ( CPU_HARDWARE_FP == TRUE )
230FRAME(_CPU_Context_restore_fp,sp,0,ra)
231        .set noat
232        ld a1,(a0)
233        NOP
234        lwc1 $f0,FP0_OFFSET*4(a1)
235        lwc1 $f1,FP1_OFFSET*4(a1)
236        lwc1 $f2,FP2_OFFSET*4(a1)
237        lwc1 $f3,FP3_OFFSET*4(a1)
238        lwc1 $f4,FP4_OFFSET*4(a1)
239        lwc1 $f5,FP5_OFFSET*4(a1)
240        lwc1 $f6,FP6_OFFSET*4(a1)
241        lwc1 $f7,FP7_OFFSET*4(a1)
242        lwc1 $f8,FP8_OFFSET*4(a1)
243        lwc1 $f9,FP9_OFFSET*4(a1)
244        lwc1 $f10,FP10_OFFSET*4(a1)
245        lwc1 $f11,FP11_OFFSET*4(a1)
246        lwc1 $f12,FP12_OFFSET*4(a1)
247        lwc1 $f13,FP13_OFFSET*4(a1)
248        lwc1 $f14,FP14_OFFSET*4(a1)
249        lwc1 $f15,FP15_OFFSET*4(a1)
250        lwc1 $f16,FP16_OFFSET*4(a1)
251        lwc1 $f17,FP17_OFFSET*4(a1)
252        lwc1 $f18,FP18_OFFSET*4(a1)
253        lwc1 $f19,FP19_OFFSET*4(a1)
254        lwc1 $f20,FP20_OFFSET*4(a1)
255        lwc1 $f21,FP21_OFFSET*4(a1)
256        lwc1 $f22,FP22_OFFSET*4(a1)
257        lwc1 $f23,FP23_OFFSET*4(a1)
258        lwc1 $f24,FP24_OFFSET*4(a1)
259        lwc1 $f25,FP25_OFFSET*4(a1)
260        lwc1 $f26,FP26_OFFSET*4(a1)
261        lwc1 $f27,FP27_OFFSET*4(a1)
262        lwc1 $f28,FP28_OFFSET*4(a1)
263        lwc1 $f29,FP29_OFFSET*4(a1)
264        lwc1 $f30,FP30_OFFSET*4(a1)
265        lwc1 $f31,FP31_OFFSET*4(a1)
266        j ra
267        nop
268        .set at
269ENDFRAME(_CPU_Context_restore_fp)
270#endif
271
272/*  _CPU_Context_switch
273 *
274 *  This routine performs a normal non-FP context switch.
275 */
276
277/* void _CPU_Context_switch(
278 *   Context_Control  *run,
279 *   Context_Control  *heir
280 * )
281 */
282
283FRAME(_CPU_Context_switch,sp,0,ra)
284
285        MFC0  t0,C0_SR
286        li    t1,~(SR_INTERRUPT_ENABLE_BITS)
287        STREG t0,C0_SR_OFFSET*4(a0)     /* save status register */
288        and   t0,t1
289        MTC0  t0,C0_SR                  /* first disable ie bit (recommended) */
290#if __mips == 3
291        ori   t0,SR_EXL|SR_IE           /* enable exception level to disable interrupts */
292        MTC0  t0,C0_SR
293#endif
294
295        STREG ra,RA_OFFSET*R_SZ(a0)     /* save current context */
296        STREG sp,SP_OFFSET*R_SZ(a0)
297        STREG fp,FP_OFFSET*R_SZ(a0)
298        STREG s0,S0_OFFSET*R_SZ(a0)
299        STREG s1,S1_OFFSET*R_SZ(a0)
300        STREG s2,S2_OFFSET*R_SZ(a0)
301        STREG s3,S3_OFFSET*R_SZ(a0)
302        STREG s4,S4_OFFSET*R_SZ(a0)
303        STREG s5,S5_OFFSET*R_SZ(a0)
304        STREG s6,S6_OFFSET*R_SZ(a0)
305        STREG s7,S7_OFFSET*R_SZ(a0)
306
307        /*
308        MFC0  t0,C0_EPC
309        NOP
310        STREG t0,C0_EPC_OFFSET*R_SZ(a0)
311        */
312
313_CPU_Context_switch_restore:
314        LDREG ra,RA_OFFSET*R_SZ(a1)         /* restore context */
315        LDREG sp,SP_OFFSET*R_SZ(a1)
316        LDREG fp,FP_OFFSET*R_SZ(a1)
317        LDREG s0,S0_OFFSET*R_SZ(a1)
318        LDREG s1,S1_OFFSET*R_SZ(a1)
319        LDREG s2,S2_OFFSET*R_SZ(a1)
320        LDREG s3,S3_OFFSET*R_SZ(a1)
321        LDREG s4,S4_OFFSET*R_SZ(a1)
322        LDREG s5,S5_OFFSET*R_SZ(a1)
323        LDREG s6,S6_OFFSET*R_SZ(a1)
324        LDREG s7,S7_OFFSET*R_SZ(a1)
325
326        /*
327        LDREG t0,C0_EPC_OFFSET*R_SZ(a1)
328        NOP
329        MTC0  t0,C0_EPC
330        */
331       
332        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
333        NOP
334       
335#if __mips == 3
336        andi  t0,SR_EXL
337        bnez  t0,_CPU_Context_1   /* set exception level from restore context */
338        li    t0,~SR_EXL
339        MFC0  t1,C0_SR
340        NOP
341        and   t1,t0
342        MTC0  t1,C0_SR
343
344#elif __mips == 1
345        andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
346        beq   t0,$0,_CPU_Context_1          /* set level from restore context */
347        MFC0  t0,C0_SR
348        NOP
349        or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  */
350        MTC0  t0,C0_SR                      /* set with enabled */
351#endif
352
353       
354_CPU_Context_1:
355        j ra
356        NOP
357ENDFRAME(_CPU_Context_switch)
358
359/*
360 *  _CPU_Context_restore
361 *
362 *  This routine is generally used only to restart self in an
363 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
364 *
365 *  NOTE: May be unnecessary to reload some registers.
366 *
367 *  void _CPU_Context_restore(
368 *    Context_Control *new_context
369 *  );
370 */
371
372FRAME(_CPU_Context_restore,sp,0,ra)
373        ADD a1,a0,zero
374        j   _CPU_Context_switch_restore
375        NOP
376ENDFRAME(_CPU_Context_restore)
377
378ASM_EXTERN(_ISR_Nest_level, SZ_INT)
379ASM_EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
380ASM_EXTERN(_Context_Switch_necessary,SZ_INT)
381ASM_EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
382ASM_EXTERN(_Thread_Executing,SZ_INT)
383.extern _Thread_Dispatch
384.extern _ISR_Vector_table
385
386/*  void __ISR_Handler()
387 *
388 *  This routine provides the RTEMS interrupt management.
389 *
390 *  void _ISR_Handler()
391 *
392 *
393 *  This discussion ignores a lot of the ugly details in a real
394 *  implementation such as saving enough registers/state to be
395 *  able to do something real.  Keep in mind that the goal is
396 *  to invoke a user's ISR handler which is written in C and
397 *  uses a certain set of registers.
398 *
399 *  Also note that the exact order is to a large extent flexible.
400 *  Hardware will dictate a sequence for a certain subset of
401 *  _ISR_Handler while requirements for setting
402 *
403 *  At entry to "common" _ISR_Handler, the vector number must be
404 *  available.  On some CPUs the hardware puts either the vector
405 *  number or the offset into the vector table for this ISR in a
406 *  known place.  If the hardware does not give us this information,
407 *  then the assembly portion of RTEMS for this port will contain
408 *  a set of distinct interrupt entry points which somehow place
409 *  the vector number in a known place (which is safe if another
410 *  interrupt nests this one) and branches to _ISR_Handler.
411 *
412 */
413
414FRAME(_ISR_Handler,sp,0,ra)
415        .set noreorder
416
417        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
418
419        /* wastes a lot of stack space for context?? */
420        ADDIU    sp,sp,-EXCP_STACK_SIZE
421
422        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
423        STREG v0, R_V0*R_SZ(sp)
424        STREG v1, R_V1*R_SZ(sp)
425        STREG a0, R_A0*R_SZ(sp)
426        STREG a1, R_A1*R_SZ(sp)
427        STREG a2, R_A2*R_SZ(sp)
428        STREG a3, R_A3*R_SZ(sp)
429        STREG t0, R_T0*R_SZ(sp)
430        STREG t1, R_T1*R_SZ(sp)
431        STREG t2, R_T2*R_SZ(sp)
432        STREG t3, R_T3*R_SZ(sp)
433        STREG t4, R_T4*R_SZ(sp)
434        STREG t5, R_T5*R_SZ(sp)
435        STREG t6, R_T6*R_SZ(sp)
436        STREG t7, R_T7*R_SZ(sp)
437        mflo  t0
438        STREG t8, R_T8*R_SZ(sp)
439        STREG t0, R_MDLO*R_SZ(sp)
440        STREG t9, R_T9*R_SZ(sp)
441        mfhi  t0
442        STREG gp, R_GP*R_SZ(sp)
443        STREG t0, R_MDHI*R_SZ(sp)
444        STREG fp, R_FP*R_SZ(sp)
445        .set noat
446        STREG AT, R_AT*R_SZ(sp)
447        .set at
448
449        MFC0     t0,C0_SR
450        MFC0     t1,C0_EPC
451        STREG    t0,R_SR*R_SZ(sp)
452        STREG    t1,R_EPC*R_SZ(sp)
453
454
455#ifdef INSTRUMENT
456        lw t2, _Thread_Executing
457        nop
458        sw t2, 0x8001FFF0
459
460        sw t0, 0x8001F050
461        sw t1, 0x8001F054
462
463        li t0, 0xdeadbeef
464        li t1, 0xdeadbeef
465        li t2, 0xdeadbeef
466                       
467        sw ra, 0x8001F000
468        sw v0, 0x8001F004
469        sw v1, 0x8001F008
470        sw a0, 0x8001F00c
471        sw a1, 0x8001F010
472        sw a2, 0x8001F014
473        sw a3, 0x8001F018
474        sw t0, 0x8001F01c
475        sw t1, 0x8001F020
476        sw t2, 0x8001F024
477        sw t3, 0x8001F028
478        sw t4, 0x8001F02c
479        sw t5, 0x8001F030
480        sw t6, 0x8001F034
481        sw t7, 0x8001F038
482        sw t8, 0x8001F03c
483        sw t9, 0x8001F040
484        sw gp, 0x8001F044
485        sw fp, 0x8001F048
486#endif
487       
488/* determine if an interrupt generated this exception */
489
490        MFC0     k0,C0_CAUSE
491        NOP
492
493        and      k1,k0,CAUSE_EXCMASK
494        beq      k1, 0, _ISR_Handler_1
495
496_ISR_Handler_Exception:
497
498        /* if we return from the exception, it is assumed nothing */
499        /* bad is going on and we can continue to run normally */
500       
501        move     a0,sp
502        jal      mips_vector_exceptions
503        nop
504        j        _ISR_Handler_exit
505        nop
506
507_ISR_Handler_1:
508
509        MFC0     k1,C0_SR
510        and      k0,CAUSE_IPMASK
511        and      k0,k1
512
513        /* external interrupt not enabled, ignore */
514        /* but if it's not an exception or an interrupt, */
515        /* Then where did it come from??? */
516       
517        beq      k0,zero,_ISR_Handler_exit
518       
519        li       t2,1           /* set a flag so we process interrupts */
520       
521  /*
522   *  save some or all context on stack
523   *  may need to save some special interrupt information for exit
524   *
525   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
526   *    if ( _ISR_Nest_level == 0 )
527   *      switch to software interrupt stack
528   *  #endif
529   */
530
531  /*
532   *  _ISR_Nest_level++;
533   */
534        LDREG  t0,_ISR_Nest_level
535        NOP
536        ADD    t0,t0,1
537        STREG  t0,_ISR_Nest_level
538  /*
539   *  _Thread_Dispatch_disable_level++;
540   */
541        LDREG  t1,_Thread_Dispatch_disable_level
542        NOP
543        ADD    t1,t1,1
544        STREG  t1,_Thread_Dispatch_disable_level
545
546  /*
547   *  Call the CPU model or BSP specific routine to decode the
548   *  interrupt source and actually vector to device ISR handlers.
549   */
550        move     a0,sp
551        jal      mips_vector_isr_handlers
552        nop
553
554  /*
555   *  --_ISR_Nest_level;
556   */
557        LDREG  t2,_ISR_Nest_level
558        NOP
559        ADD    t2,t2,-1
560        STREG  t2,_ISR_Nest_level
561  /*
562   *  --_Thread_Dispatch_disable_level;
563   */
564        LDREG  t1,_Thread_Dispatch_disable_level
565        NOP
566        ADD    t1,t1,-1
567        STREG  t1,_Thread_Dispatch_disable_level
568  /*
569   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
570   *    goto the label "exit interrupt (simple case)"
571   */
572        or  t0,t2,t1
573        bne t0,zero,_ISR_Handler_exit
574        nop
575  /*
576   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
577   *    restore stack
578   *  #endif
579   * 
580   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
581   *    goto the label "exit interrupt (simple case)"
582   */
583        LDREG t0,_Context_Switch_necessary
584        LDREG t1,_ISR_Signals_to_thread_executing
585        NOP
586        or    t0,t0,t1
587        beq   t0,zero,_ISR_Handler_exit
588        nop
589
590       
591       
592#ifdef INSTRUMENT
593        li      t0,0x11111111
594        sw      t0,0x8001F104
595#endif
596       
597  /* restore interrupt state from the saved status register,
598   * if the isr vectoring didn't so we allow nested interrupts to
599   * occur */
600               
601        LDREG    t0,R_SR*R_SZ(sp)
602        NOP
603        MTC0     t0,C0_SR
604        rfe
605       
606
607        jal _Thread_Dispatch
608        nop
609
610#ifdef INSTRUMENT
611        li      t0,0x22222222
612        sw      t0,0x8001F100
613#endif
614
615       
616                       
617
618  /*
619   *  prepare to get out of interrupt
620   *  return from interrupt  (maybe to _ISR_Dispatch)
621   *
622   *  LABEL "exit interrupt (simple case):"
623   *  prepare to get out of interrupt
624   *  return from interrupt
625   */
626
627_ISR_Handler_exit:
628        LDREG    t0, R_SR*R_SZ(sp)
629        NOP
630        MTC0     t0, C0_SR
631
632/* restore context from stack */
633
634#ifdef INSTRUMENT
635        lw      t0,_Thread_Executing
636        nop
637        sw      t0, 0x8001FFF4
638#endif
639
640        LDREG k0, R_MDLO*R_SZ(sp)
641        LDREG t0, R_T0*R_SZ(sp)
642        mtlo  k0
643        LDREG k0, R_MDHI*R_SZ(sp)           
644        LDREG t1, R_T1*R_SZ(sp)
645        mthi  k0
646        LDREG t2, R_T2*R_SZ(sp)
647        LDREG t3, R_T3*R_SZ(sp)
648        LDREG t4, R_T4*R_SZ(sp)
649        LDREG t5, R_T5*R_SZ(sp)
650        LDREG t6, R_T6*R_SZ(sp)
651        LDREG t7, R_T7*R_SZ(sp)
652        LDREG t8, R_T8*R_SZ(sp)
653        LDREG t9, R_T9*R_SZ(sp)
654        LDREG gp, R_GP*R_SZ(sp)
655        LDREG fp, R_FP*R_SZ(sp)
656        LDREG ra, R_RA*R_SZ(sp)
657        LDREG a0, R_A0*R_SZ(sp)
658        LDREG a1, R_A1*R_SZ(sp)
659        LDREG a2, R_A2*R_SZ(sp)
660        LDREG a3, R_A3*R_SZ(sp)
661        LDREG v1, R_V1*R_SZ(sp)
662        LDREG v0, R_V0*R_SZ(sp)
663
664#ifdef INSTRUMENT
665        sw ra, 0x8001F000
666        sw v0, 0x8001F004
667        sw v1, 0x8001F008
668        sw a0, 0x8001F00c
669        sw a1, 0x8001F010
670        sw a2, 0x8001F014
671        sw a3, 0x8001F018
672        sw t0, 0x8001F01c
673        sw t1, 0x8001F020
674        sw t2, 0x8001F024
675        sw t3, 0x8001F028
676        sw t4, 0x8001F02c
677        sw t5, 0x8001F030
678        sw t6, 0x8001F034
679        sw t7, 0x8001F038
680        sw t8, 0x8001F03c
681        sw t9, 0x8001F040
682        sw gp, 0x8001F044
683        sw fp, 0x8001F048
684#endif
685       
686        LDREG     k0, R_EPC*R_SZ(sp)
687       
688        .set noat
689        LDREG AT, R_AT*R_SZ(sp)
690        .set at
691
692        ADDIU     sp,sp,EXCP_STACK_SIZE
693        j         k0
694        rfe
695        nop
696
697       .set    reorder
698ENDFRAME(_ISR_Handler)
699
700FRAME(mips_break,sp,0,ra)
701#if 1
702        break 0x0
703        j mips_break
704#else
705        j ra
706#endif
707        nop
708ENDFRAME(mips_break)
709
Note: See TracBrowser for help on using the repository browser.