source: rtems/c/src/exec/score/cpu/mips64orion/cpu_asm.S @ f198c63

4.104.114.84.95
Last change on this file since f198c63 was f198c63, checked in by Joel Sherrill <joel.sherrill@…>, on 09/06/96 at 18:11:41

new file for MIPS port by Craig Lebakken (lebakken@…) and
Derrick Ostertag (ostertag@…).

  • Property mode set to 100644
File size: 22.0 KB
Line 
1/*  cpu_asm.S
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language
6 *
7 *  Author:     Craig Lebakken <craigl@transition.com>
8 *
9 *  COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *
11 *  To anyone who acknowledges that this file is provided "AS IS"
12 *  without any express or implied warranty:
13 *      permission to use, copy, modify, and distribute this file
14 *      for any purpose is hereby granted without fee, provided that
15 *      the above copyright notice and this notice appears in all
16 *      copies, and that the name of Transition Networks not be used in
17 *      advertising or publicity pertaining to distribution of the
18 *      software without specific, written prior permission.
19 *      Transition Networks makes no representations about the suitability
20 *      of this software for any purpose.
21 *
22 *  Derived from c/src/exec/score/cpu/no_cpu/cpu_asm.s:
23 *
24 *  COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
25 *  On-Line Applications Research Corporation (OAR).
26 *  All rights assigned to U.S. Government, 1994.
27 *
28 *  This material may be reproduced by or for the U.S. Government pursuant
29 *  to the copyright license under the clause at DFARS 252.227-7013.  This
30 *  notice must appear in all copies of this file and its derivatives.
31 *
32 *  cpu_asm.c,v 1.5 1995/09/26 19:25:39 joel Exp
33 */
34/* @(#)cpu_asm.S       08/20/96     1.15 */
35
36#include "cpu_asm.h"
37
38#include "iregdef.h"
39#include "idtcpu.h"
40
41#define FRAME(name,frm_reg,offset,ret_reg)      \
42        .globl  name;                           \
43        .ent    name;                           \
44name:;                                          \
45        .frame  frm_reg,offset,ret_reg
46#define ENDFRAME(name)                          \
47        .end name
48
49
50#define EXCP_STACK_SIZE (NREGS*R_SZ)
51
52#if __ghs__
53#define sd sw
54#define ld lw
55#define dmtc0 mtc0
56#define dsll sll
57#define dmfc0 mfc0
58#endif
59
60#if 1  /* 32 bit unsigned32 types */
61#define sint sw
62#define lint lw
63#define stackadd addiu
64#define intadd addu
65#define SZ_INT 4
66#define SZ_INT_POW2 2
67#else /* 64 bit unsigned32 types */
68#define sint dw
69#define lint dw
70#define stackadd daddiu
71#define intadd daddu
72#define SZ_INT 8
73#define SZ_INT_POW2 3
74#endif
75
76#ifdef __GNUC__
77#define EXTERN(x,size) .extern x,size
78#else
79#define EXTERN(x,size)
80#endif
81
82/* NOTE: these constants must match the Context_Control structure in cpu.h */
83#define S0_OFFSET 0
84#define S1_OFFSET 1
85#define S2_OFFSET 2
86#define S3_OFFSET 3
87#define S4_OFFSET 4
88#define S5_OFFSET 5
89#define S6_OFFSET 6
90#define S7_OFFSET 7
91#define SP_OFFSET 8
92#define FP_OFFSET 9
93#define RA_OFFSET 10
94#define C0_SR_OFFSET 11
95#define C0_EPC_OFFSET 12
96
97/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
98#define FP0_OFFSET  0
99#define FP1_OFFSET  1
100#define FP2_OFFSET  2
101#define FP3_OFFSET  3
102#define FP4_OFFSET  4
103#define FP5_OFFSET  5
104#define FP6_OFFSET  6
105#define FP7_OFFSET  7
106#define FP8_OFFSET  8
107#define FP9_OFFSET  9
108#define FP10_OFFSET 10
109#define FP11_OFFSET 11
110#define FP12_OFFSET 12
111#define FP13_OFFSET 13
112#define FP14_OFFSET 14
113#define FP15_OFFSET 15
114#define FP16_OFFSET 16
115#define FP17_OFFSET 17
116#define FP18_OFFSET 18
117#define FP19_OFFSET 19
118#define FP20_OFFSET 20
119#define FP21_OFFSET 21
120#define FP22_OFFSET 22
121#define FP23_OFFSET 23
122#define FP24_OFFSET 24
123#define FP25_OFFSET 25
124#define FP26_OFFSET 26
125#define FP27_OFFSET 27
126#define FP28_OFFSET 28
127#define FP29_OFFSET 29
128#define FP30_OFFSET 30
129#define FP31_OFFSET 31
130
131
132/*PAGE
133 *
134 *  _CPU_ISR_Get_level
135 */
136
137#if 0
138unsigned32 _CPU_ISR_Get_level( void )
139{
140  /*
141   *  This routine returns the current interrupt level.
142   */
143}
144#endif
145/* return the current exception level for the 4650 */
146FRAME(_CPU_ISR_Get_level,sp,0,ra)
147        mfc0 v0,C0_SR
148        nop
149        andi v0,SR_EXL
150        srl v0,1
151        j ra
152ENDFRAME(_CPU_ISR_Get_level)
153
154FRAME(_CPU_ISR_Set_level,sp,0,ra)
155        nop
156        mfc0 a0,C0_SR
157        nop
158        andi a0,SR_EXL
159        beqz a0,_CPU_ISR_Set_1          /* normalize a0 */
160        nop
161        li a0,1         
162_CPU_ISR_Set_1:
163        beq v0,a0,_CPU_ISR_Set_exit     /* if (current_level != new_level ) */
164        nop
165        bnez a0,_CPU_ISR_Set_2
166        nop
167        nop
168        mfc0 t0,C0_SR
169        nop
170        li t1,~SR_EXL
171        and t0,t1
172        nop
173        mtc0 t0,C0_SR                   /* disable exception level */
174        nop
175        j ra
176        nop
177_CPU_ISR_Set_2:
178        nop
179        mfc0 t0,C0_SR
180        nop
181        li t1,~SR_IE
182        and t0,t1
183        nop
184        mtc0 t0,C0_SR                   /* first disable ie bit (recommended) */
185        nop
186        ori t0,SR_EXL|SR_IE             /* enable exception level  */
187        nop
188        mtc0 t0,C0_SR         
189        nop
190_CPU_ISR_Set_exit:
191        j ra
192        nop
193ENDFRAME(_CPU_ISR_Set_level)
194
195/*
196 *  _CPU_Context_save_fp_context
197 *
198 *  This routine is responsible for saving the FP context
199 *  at *fp_context_ptr.  If the point to load the FP context
200 *  from is changed then the pointer is modified by this routine.
201 *
202 *  Sometimes a macro implementation of this is in cpu.h which dereferences
203 *  the ** and a similarly named routine in this file is passed something
204 *  like a (Context_Control_fp *).  The general rule on making this decision
205 *  is to avoid writing assembly language.
206 */
207
208/* void _CPU_Context_save_fp(
209 * void **fp_context_ptr
210 * )
211 * {
212 * }
213 */
214
215FRAME(_CPU_Context_save_fp,sp,0,ra)
216        .set noat
217        ld a1,(a0)
218        swc1 $f0,FP0_OFFSET*4(a1)
219        swc1 $f1,FP1_OFFSET*4(a1)
220        swc1 $f2,FP2_OFFSET*4(a1)
221        swc1 $f3,FP3_OFFSET*4(a1)
222        swc1 $f4,FP4_OFFSET*4(a1)
223        swc1 $f5,FP5_OFFSET*4(a1)
224        swc1 $f6,FP6_OFFSET*4(a1)
225        swc1 $f7,FP7_OFFSET*4(a1)
226        swc1 $f8,FP8_OFFSET*4(a1)
227        swc1 $f9,FP9_OFFSET*4(a1)
228        swc1 $f10,FP10_OFFSET*4(a1)
229        swc1 $f11,FP11_OFFSET*4(a1)
230        swc1 $f12,FP12_OFFSET*4(a1)
231        swc1 $f13,FP13_OFFSET*4(a1)
232        swc1 $f14,FP14_OFFSET*4(a1)
233        swc1 $f15,FP15_OFFSET*4(a1)
234        swc1 $f16,FP16_OFFSET*4(a1)
235        swc1 $f17,FP17_OFFSET*4(a1)
236        swc1 $f18,FP18_OFFSET*4(a1)
237        swc1 $f19,FP19_OFFSET*4(a1)
238        swc1 $f20,FP20_OFFSET*4(a1)
239        swc1 $f21,FP21_OFFSET*4(a1)
240        swc1 $f22,FP22_OFFSET*4(a1)
241        swc1 $f23,FP23_OFFSET*4(a1)
242        swc1 $f24,FP24_OFFSET*4(a1)
243        swc1 $f25,FP25_OFFSET*4(a1)
244        swc1 $f26,FP26_OFFSET*4(a1)
245        swc1 $f27,FP27_OFFSET*4(a1)
246        swc1 $f28,FP28_OFFSET*4(a1)
247        swc1 $f29,FP29_OFFSET*4(a1)
248        swc1 $f30,FP30_OFFSET*4(a1)
249        swc1 $f31,FP31_OFFSET*4(a1)
250        j ra
251        nop
252        .set at
253ENDFRAME(_CPU_Context_save_fp)
254
255/*
256 *  _CPU_Context_restore_fp_context
257 *
258 *  This routine is responsible for restoring the FP context
259 *  at *fp_context_ptr.  If the point to load the FP context
260 *  from is changed then the pointer is modified by this routine.
261 *
262 *  Sometimes a macro implementation of this is in cpu.h which dereferences
263 *  the ** and a similarly named routine in this file is passed something
264 *  like a (Context_Control_fp *).  The general rule on making this decision
265 *  is to avoid writing assembly language.
266 */
267
268/* void _CPU_Context_restore_fp(
269 * void **fp_context_ptr
270 * )
271 * {
272 * }
273 */
274
275FRAME(_CPU_Context_restore_fp,sp,0,ra)
276        .set noat
277        ld a1,(a0)
278        lwc1 $f0,FP0_OFFSET*4(a1)
279        lwc1 $f1,FP1_OFFSET*4(a1)
280        lwc1 $f2,FP2_OFFSET*4(a1)
281        lwc1 $f3,FP3_OFFSET*4(a1)
282        lwc1 $f4,FP4_OFFSET*4(a1)
283        lwc1 $f5,FP5_OFFSET*4(a1)
284        lwc1 $f6,FP6_OFFSET*4(a1)
285        lwc1 $f7,FP7_OFFSET*4(a1)
286        lwc1 $f8,FP8_OFFSET*4(a1)
287        lwc1 $f9,FP9_OFFSET*4(a1)
288        lwc1 $f10,FP10_OFFSET*4(a1)
289        lwc1 $f11,FP11_OFFSET*4(a1)
290        lwc1 $f12,FP12_OFFSET*4(a1)
291        lwc1 $f13,FP13_OFFSET*4(a1)
292        lwc1 $f14,FP14_OFFSET*4(a1)
293        lwc1 $f15,FP15_OFFSET*4(a1)
294        lwc1 $f16,FP16_OFFSET*4(a1)
295        lwc1 $f17,FP17_OFFSET*4(a1)
296        lwc1 $f18,FP18_OFFSET*4(a1)
297        lwc1 $f19,FP19_OFFSET*4(a1)
298        lwc1 $f20,FP20_OFFSET*4(a1)
299        lwc1 $f21,FP21_OFFSET*4(a1)
300        lwc1 $f22,FP22_OFFSET*4(a1)
301        lwc1 $f23,FP23_OFFSET*4(a1)
302        lwc1 $f24,FP24_OFFSET*4(a1)
303        lwc1 $f25,FP25_OFFSET*4(a1)
304        lwc1 $f26,FP26_OFFSET*4(a1)
305        lwc1 $f27,FP27_OFFSET*4(a1)
306        lwc1 $f28,FP28_OFFSET*4(a1)
307        lwc1 $f29,FP29_OFFSET*4(a1)
308        lwc1 $f30,FP30_OFFSET*4(a1)
309        lwc1 $f31,FP31_OFFSET*4(a1)
310        j ra
311        nop
312        .set at
313ENDFRAME(_CPU_Context_restore_fp)
314
315/*  _CPU_Context_switch
316 *
317 *  This routine performs a normal non-FP context switch.
318 */
319
320/* void _CPU_Context_switch(
321 * Context_Control  *run,
322 * Context_Control  *heir
323 * )
324 * {
325 * }
326 */
327
328FRAME(_CPU_Context_switch,sp,0,ra)
329
330        mfc0 t0,C0_SR
331        li t1,~SR_IE
332        sd t0,C0_SR_OFFSET*8(a0)        /* save status register */
333        and t0,t1
334        mtc0 t0,C0_SR                   /* first disable ie bit (recommended) */
335        ori t0,SR_EXL|SR_IE             /* enable exception level to disable interrupts */
336        mtc0 t0,C0_SR
337
338        sd ra,RA_OFFSET*8(a0)           /* save current context */
339        sd sp,SP_OFFSET*8(a0)
340        sd fp,FP_OFFSET*8(a0)
341        sd s0,S0_OFFSET*8(a0)
342        sd s1,S1_OFFSET*8(a0)
343        sd s2,S2_OFFSET*8(a0)
344        sd s3,S3_OFFSET*8(a0)
345        sd s4,S4_OFFSET*8(a0)
346        sd s5,S5_OFFSET*8(a0)
347        sd s6,S6_OFFSET*8(a0)
348        sd s7,S7_OFFSET*8(a0)
349        dmfc0 t0,C0_EPC
350        sd t0,C0_EPC_OFFSET*8(a0)
351
352_CPU_Context_switch_restore:
353        ld s0,S0_OFFSET*8(a1)           /* restore context */
354        ld s1,S1_OFFSET*8(a1)
355        ld s2,S2_OFFSET*8(a1)
356        ld s3,S3_OFFSET*8(a1)
357        ld s4,S4_OFFSET*8(a1)
358        ld s5,S5_OFFSET*8(a1)
359        ld s6,S6_OFFSET*8(a1)
360        ld s7,S7_OFFSET*8(a1)
361        ld fp,FP_OFFSET*8(a1)
362        ld sp,SP_OFFSET*8(a1)
363        ld ra,RA_OFFSET*8(a1)
364        ld t0,C0_EPC_OFFSET*8(a1)
365        dmtc0 t0,C0_EPC
366        ld t0,C0_SR_OFFSET*8(a1)
367        andi t0,SR_EXL
368        bnez t0,_CPU_Context_1          /* set exception level from restore context */
369        li t0,~SR_EXL
370        mfc0 t1,C0_SR
371        nop
372        and t1,t0
373        mtc0 t1,C0_SR
374_CPU_Context_1:
375        j ra
376        nop
377ENDFRAME(_CPU_Context_switch)
378
379/*
380 *  _CPU_Context_restore
381 *
382 *  This routine is generally used only to restart self in an
383 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
384 *
385 *  NOTE: May be unnecessary to reload some registers.
386 */
387
388#if 0
389void _CPU_Context_restore(
390  Context_Control *new_context
391)
392{
393}
394#endif
395
396FRAME(_CPU_Context_restore,sp,0,ra)
397        dadd a1,a0,zero
398        j _CPU_Context_switch_restore
399        nop
400ENDFRAME(_CPU_Context_restore)
401
402EXTERN(_ISR_Nest_level, SZ_INT)
403EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
404EXTERN(_Context_Switch_necessary,SZ_INT)
405EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
406.extern _Thread_Dispatch
407.extern _ISR_Vector_table
408
409/*  void __ISR_Handler()
410 *
411 *  This routine provides the RTEMS interrupt management.
412 *
413 */
414
415#if 0
416void _ISR_Handler()
417{
418   /*
419    *  This discussion ignores a lot of the ugly details in a real
420    *  implementation such as saving enough registers/state to be
421    *  able to do something real.  Keep in mind that the goal is
422    *  to invoke a user's ISR handler which is written in C and
423    *  uses a certain set of registers.
424    *
425    *  Also note that the exact order is to a large extent flexible.
426    *  Hardware will dictate a sequence for a certain subset of
427    *  _ISR_Handler while requirements for setting
428    */
429
430  /*
431   *  At entry to "common" _ISR_Handler, the vector number must be
432   *  available.  On some CPUs the hardware puts either the vector
433   *  number or the offset into the vector table for this ISR in a
434   *  known place.  If the hardware does not give us this information,
435   *  then the assembly portion of RTEMS for this port will contain
436   *  a set of distinct interrupt entry points which somehow place
437   *  the vector number in a known place (which is safe if another
438   *  interrupt nests this one) and branches to _ISR_Handler.
439   *
440   */
441#endif
442FRAME(_ISR_Handler,sp,0,ra)
443.set noreorder
444#if USE_IDTKIT
445/* IDT/Kit incorrectly adds 4 to EPC before returning.  This compensates */
446        lreg    k0, R_EPC*R_SZ(sp)
447        daddiu    k0,k0,-4
448        sreg    k0, R_EPC*R_SZ(sp)
449        lreg    k0, R_CAUSE*R_SZ(sp)
450        li      k1, ~CAUSE_BD
451        and     k0, k1
452        sreg    k0, R_CAUSE*R_SZ(sp)
453#endif
454       
455/* save registers not already saved by IDT/sim */
456        stackadd sp,sp,-EXCP_STACK_SIZE /* store ra on the stack */
457
458        sreg    ra, R_RA*R_SZ(sp)
459        sreg    v0, R_V0*R_SZ(sp)
460        sreg    v1, R_V1*R_SZ(sp)
461        sreg    a0, R_A0*R_SZ(sp)
462        sreg    a1, R_A1*R_SZ(sp)
463        sreg    a2, R_A2*R_SZ(sp)
464        sreg    a3, R_A3*R_SZ(sp)
465        sreg    t0, R_T0*R_SZ(sp)
466        sreg    t1, R_T1*R_SZ(sp)
467        sreg    t2, R_T2*R_SZ(sp)
468        sreg    t3, R_T3*R_SZ(sp)
469        sreg    t4, R_T4*R_SZ(sp)
470        sreg    t5, R_T5*R_SZ(sp)
471        sreg    t6, R_T6*R_SZ(sp)
472        sreg    t7, R_T7*R_SZ(sp)
473        mflo    k0
474        sreg    t8, R_T8*R_SZ(sp)
475        sreg    k0, R_MDLO*R_SZ(sp)
476        sreg    t9, R_T9*R_SZ(sp)
477        mfhi    k0
478        sreg    gp, R_GP*R_SZ(sp)
479        sreg    fp, R_FP*R_SZ(sp)
480        sreg    k0, R_MDHI*R_SZ(sp)
481        .set noat
482        sreg    AT, R_AT*R_SZ(sp)
483        .set at
484
485        stackadd sp,sp,-40              /* store ra on the stack */
486        sd ra,32(sp)
487
488/* determine if an interrupt generated this exception */
489        mfc0 k0,C0_CAUSE
490        and k1,k0,CAUSE_EXCMASK
491        bnez k1,_ISR_Handler_prom_exit /* not an external interrupt, pass exception to Monitor */
492        mfc0 k1,C0_SR
493        and k0,k1
494        and k0,CAUSE_IPMASK
495        beq k0,zero,_ISR_Handler_quick_exit /* external interrupt not enabled, ignore */
496        nop
497
498  /*
499   *  save some or all context on stack
500   *  may need to save some special interrupt information for exit
501   *
502   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
503   *    if ( _ISR_Nest_level == 0 )
504   *      switch to software interrupt stack
505   *  #endif
506   */
507#if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
508        lint t0,_ISR_Nest_level
509        beq t0, zero,  _ISR_Handler_1
510        nop
511        /* switch stacks */     
512_ISR_Handler_1:
513#else
514        lint t0,_ISR_Nest_level
515#endif
516  /*
517   *  _ISR_Nest_level++;
518   */
519        addi t0,t0,1
520        sint t0,_ISR_Nest_level
521  /*
522   *  _Thread_Dispatch_disable_level++;
523   */
524        lint t1,_Thread_Dispatch_disable_level
525        addi t1,t1,1
526        sint t1,_Thread_Dispatch_disable_level
527#if 0
528        nop
529        j _ISR_Handler_4
530        nop
531  /*
532   *  while ( interrupts_pending(cause_reg) ) {
533   *     vector = BITFIELD_TO_INDEX(cause_reg);
534   *     (*_ISR_Vector_table[ vector ])( vector );
535   *  }
536   */
537_ISR_Handler_2:
538/* software interrupt priorities can be applied here */
539        li t1,-1
540/* convert bit field into interrupt index */
541_ISR_Handler_3:
542        andi t2,t0,1
543        addi t1,1
544        beql t2,zero,_ISR_Handler_3
545        dsrl t0,1
546        li t1,7
547        dsll t1,3                       /* convert index to byte offset (*8) */
548        la t3,_ISR_Vector_table
549        intadd t1,t3
550        lint t1,(t1)
551        jalr t1
552        nop
553        j _ISR_Handler_5
554        nop
555_ISR_Handler_4:
556        mfc0 t0,C0_CAUSE
557        andi t0,CAUSE_IPMASK
558        bne t0,zero,_ISR_Handler_2
559        dsrl t0,t0,8
560_ISR_Handler_5:
561#else
562        nop
563        li t1,7
564        dsll t1,t1,SZ_INT_POW2
565        la t3,_ISR_Vector_table
566        intadd t1,t3
567        lint t1,(t1)
568        jalr t1
569        nop
570#endif
571  /*
572   *  --_ISR_Nest_level;
573   */
574        lint t2,_ISR_Nest_level
575        addi t2,t2,-1
576        sint t2,_ISR_Nest_level
577  /*
578   *  --_Thread_Dispatch_disable_level;
579   */
580        lint t1,_Thread_Dispatch_disable_level
581        addi t1,t1,-1
582        sint t1,_Thread_Dispatch_disable_level
583  /*
584   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
585   *    goto the label "exit interrupt (simple case)"
586   */
587        or t0,t2,t1
588        bne t0,zero,_ISR_Handler_exit
589        nop
590  /*
591   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
592   *    restore stack
593   *  #endif
594   * 
595   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
596   *    goto the label "exit interrupt (simple case)"
597   */
598        lint t0,_Context_Switch_necessary
599        lint t1,_ISR_Signals_to_thread_executing
600        or t0,t0,t1
601        beq t0,zero,_ISR_Handler_exit
602        nop
603
604  /*
605   *  call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
606   */
607        jal _Thread_Dispatch
608        nop
609  /*
610   *  prepare to get out of interrupt
611   *  return from interrupt  (maybe to _ISR_Dispatch)
612   *
613   *  LABEL "exit interrupt (simple case):
614   *  prepare to get out of interrupt
615   *  return from interrupt
616   */
617_ISR_Handler_exit:
618        ld ra,32(sp)
619        stackadd sp,sp,40
620
621/* restore interrupt context from stack */
622        lreg    k0, R_MDLO*R_SZ(sp)
623        mtlo    k0
624        lreg    k0, R_MDHI*R_SZ(sp)
625        lreg    a2, R_A2*R_SZ(sp)
626        mthi    k0
627        lreg    a3, R_A3*R_SZ(sp)
628        lreg    t0, R_T0*R_SZ(sp)
629        lreg    t1, R_T1*R_SZ(sp)
630        lreg    t2, R_T2*R_SZ(sp)
631        lreg    t3, R_T3*R_SZ(sp)
632        lreg    t4, R_T4*R_SZ(sp)
633        lreg    t5, R_T5*R_SZ(sp)
634        lreg    t6, R_T6*R_SZ(sp)
635        lreg    t7, R_T7*R_SZ(sp)
636        lreg    t8, R_T8*R_SZ(sp)
637        lreg    t9, R_T9*R_SZ(sp)
638        lreg    gp, R_GP*R_SZ(sp)
639        lreg    fp, R_FP*R_SZ(sp)
640        lreg    ra, R_RA*R_SZ(sp)
641        lreg    a0, R_A0*R_SZ(sp)
642        lreg    a1, R_A1*R_SZ(sp)
643        lreg    v1, R_V1*R_SZ(sp)
644        lreg    v0, R_V0*R_SZ(sp)
645        .set noat
646        lreg    AT, R_AT*R_SZ(sp)
647        .set at
648
649        stackadd sp,sp,EXCP_STACK_SIZE /* store ra on the stack */
650
651#if USE_IDTKIT
652/* we handled exception, so return non-zero value */
653        li v0,1
654#endif
655
656_ISR_Handler_quick_exit:
657#ifdef USE_IDTKIT
658        j ra
659#else
660        eret
661#endif
662        nop
663
664_ISR_Handler_prom_exit:
665#ifdef CPU_R3000
666        la      k0, (R_VEC+((48)*8))
667#endif
668
669#ifdef CPU_R4000
670        la      k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */
671#endif
672        j       k0
673        nop
674
675       .set    reorder
676
677ENDFRAME(_ISR_Handler)
678
679
680FRAME(mips_enable_interrupts,sp,0,ra)
681        mfc0 t0,C0_SR           /* get status reg */
682        nop
683        or t0,t0,a0             
684        mtc0 t0,C0_SR           /* save updated status reg */
685        j ra
686        nop
687ENDFRAME(mips_enable_interrupts)
688
689FRAME(mips_disable_interrupts,sp,0,ra)
690        mfc0 v0,C0_SR           /* get status reg */
691        li t1,SR_IMASK          /* t1 = load interrupt mask word */
692        not t0,t1               /* t0 = ~t1 */
693        and t0,v0               /* clear imask bits */
694        mtc0 t0,C0_SR           /* save status reg */
695        and v0,t1               /* mask return value (only return imask bits) */
696        jr ra
697        nop
698ENDFRAME(mips_disable_interrupts)
699
700FRAME(mips_enable_global_interrupts,sp,0,ra)
701        mfc0 t0,C0_SR           /* get status reg */
702        nop
703        ori t0,SR_IE
704        mtc0 t0,C0_SR           /* save updated status reg */
705        j ra
706        nop
707ENDFRAME(mips_enable_global_interrupts)
708
709FRAME(mips_disable_global_interrupts,sp,0,ra)
710        li t1,SR_IE
711        mfc0 t0,C0_SR           /* get status reg */
712        not t1
713        and t0,t1
714        mtc0 t0,C0_SR           /* save updated status reg */
715        j ra
716        nop
717ENDFRAME(mips_disable_global_interrupts)
718
719/* return the value of the status register in v0.  Used for debugging */
720FRAME(mips_get_sr,sp,0,ra)
721        mfc0 v0,C0_SR
722        j ra
723        nop
724ENDFRAME(mips_get_sr)
725
726FRAME(mips_break,sp,0,ra)
727#if 1
728        break 0x0
729        j mips_break
730#else
731        j ra
732#endif
733        nop
734ENDFRAME(mips_break)
735
736/*PAGE
737 *
738 *  _CPU_Internal_threads_Idle_thread_body
739 *
740 *  NOTES:
741 *
742 *  1. This is the same as the regular CPU independent algorithm.
743 *
744 *  2. If you implement this using a "halt", "idle", or "shutdown"
745 *     instruction, then don't forget to put it in an infinite loop.
746 *
747 *  3. Be warned. Some processors with onboard DMA have been known
748 *     to stop the DMA if the CPU were put in IDLE mode.  This might
749 *     also be a problem with other on-chip peripherals.  So use this
750 *     hook with caution.
751 */
752
753FRAME(_CPU_Thread_Idle_body,sp,0,ra)
754        wait                    /* enter low power mode */
755        j _CPU_Thread_Idle_body
756        nop
757ENDFRAME(_CPU_Thread_Idle_body)
758
759#define VEC_CODE_LENGTH 10*4
760
761/**************************************************************************
762**
763**      init_exc_vecs() - moves the exception code into the addresses
764**                        reserved for exception vectors
765**
766**      UTLB Miss exception vector at address 0x80000000
767**
768**      General exception vector at address 0x80000080
769**
770**      RESET exception vector is at address 0xbfc00000
771**
772***************************************************************************/
773
774#define INITEXCFRM ((2*4)+4)            /* ra + 2 arguments */
775FRAME(init_exc_vecs,sp,0,ra)
776/* This code yanked from SIM */
777#if defined(CPU_R3000)
778        .set    noreorder
779        la      t1,exc_utlb_code
780        la      t2,exc_norm_code
781        li      t3,UT_VEC
782        li      t4,E_VEC
783        li      t5,VEC_CODE_LENGTH
7841:
785        lw      t6,0(t1)
786        lw      t7,0(t2)
787        sw      t6,0(t3)
788        sw      t7,0(t4)
789        addiu   t1,4
790        addiu   t3,4
791        addiu   t4,4
792        subu    t5,4
793        bne     t5,zero,1b
794        addiu   t2,4
795        move    t5,ra           # assumes clear_cache doesnt use t5
796        li      a0,UT_VEC
797        jal     clear_cache
798        li      a1,VEC_CODE_LENGTH
799        nop
800        li      a0,E_VEC
801        jal     clear_cache
802        li      a1,VEC_CODE_LENGTH
803        move    ra,t5           # restore ra
804        j       ra
805        nop
806        .set    reorder
807#endif
808#if defined(CPU_R4000)
809        .set reorder
810        move    t5,ra           # assumes clear_cache doesnt use t5
811
812        /* TLB exception vector */
813        la      t1,exc_tlb_code
814        li      t2,T_VEC |K1BASE
815        li      t3,VEC_CODE_LENGTH
8161:
817        lw      t6,0(t1)
818        addiu   t1,4
819        subu    t3,4
820        sw      t6,0(t2)
821        addiu   t2,4
822        bne     t3,zero,1b
823
824        li      a0,T_VEC
825        li      a1,VEC_CODE_LENGTH
826        jal     clear_cache
827
828        la      t1,exc_xtlb_code
829        li      t2,X_VEC |K1BASE
830        li      t3,VEC_CODE_LENGTH
8311:
832        lw      t6,0(t1)
833        addiu   t1,4
834        subu    t3,4
835        sw      t6,0(t2)
836        addiu   t2,4
837        bne     t3,zero,1b
838
839        /* extended TLB exception vector */
840        li      a0,X_VEC
841        li      a1,VEC_CODE_LENGTH
842        jal     clear_cache
843
844        /* cache error exception vector */
845        la      t1,exc_cache_code
846        li      t2,C_VEC |K1BASE
847        li      t3,VEC_CODE_LENGTH
8481:
849        lw      t6,0(t1)
850        addiu   t1,4
851        subu    t3,4
852        sw      t6,0(t2)
853        addiu   t2,4
854        bne     t3,zero,1b
855
856        li      a0,C_VEC
857        li      a1,VEC_CODE_LENGTH
858        jal     clear_cache
859
860        /* normal exception vector */
861        la      t1,exc_norm_code
862        li      t2,E_VEC |K1BASE
863        li      t3,VEC_CODE_LENGTH
8641:
865        lw      t6,0(t1)
866        addiu   t1,4
867        subu    t3,4
868        sw      t6,0(t2)
869        addiu   t2,4
870        bne     t3,zero,1b
871
872        li      a0,E_VEC
873        li      a1,VEC_CODE_LENGTH
874        jal     clear_cache
875
876        move    ra,t5           # restore ra
877        j       ra
878#endif
879ENDFRAME(init_exc_vecs)
880
881
882#if defined(CPU_R4000)
883FRAME(exc_tlb_code,sp,0,ra)
884#ifdef CPU_R3000
885        la      k0, (R_VEC+((48)*8))
886#endif
887
888#ifdef CPU_R4000
889        la      k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */
890#endif
891        j       k0
892        nop
893
894ENDFRAME(exc_tlb_code)
895
896
897FRAME(exc_xtlb_code,sp,0,ra)
898#ifdef CPU_R3000
899        la      k0, (R_VEC+((48)*8))
900#endif
901
902#ifdef CPU_R4000
903        la      k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */
904#endif
905        j       k0
906        nop
907
908ENDFRAME(exc_xtlb_code)
909
910
911FRAME(exc_cache_code,sp,0,ra)
912#ifdef CPU_R3000
913        la      k0, (R_VEC+((48)*8))
914#endif
915
916#ifdef CPU_R4000
917        la      k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */
918#endif
919        j       k0
920        nop
921
922ENDFRAME(exc_cache_code)
923
924
925FRAME(exc_norm_code,sp,0,ra)
926        la      k0, _ISR_Handler /* generic external int hndlr */
927        j       k0
928        nop
929        subu    sp, EXCP_STACK_SIZE             /* set up local stack frame */
930ENDFRAME(exc_norm_code)
931#endif
932
933/**************************************************************************
934**
935** enable_int(mask) - enables interrupts - mask is positioned so it only
936**                      needs to be or'ed into the status reg. This
937**                      also does some other things !!!! caution should
938**                      be used if invoking this while in the middle
939**                      of a debugging session where the client may have
940**                      nested interrupts.
941**
942****************************************************************************/
943FRAME(enable_int,sp,0,ra)
944        .set    noreorder
945        mfc0    t0,C0_SR
946        or      a0,1
947        or      t0,a0
948        mtc0    t0,C0_SR
949        j       ra
950        nop
951        .set    reorder
952ENDFRAME(enable_int)
953
954
955/***************************************************************************
956**
957**      disable_int(mask) - disable the interrupt - mask is the complement
958**                          of the bits to be cleared - i.e. to clear ext int
959**                          5 the mask would be - 0xffff7fff
960**
961****************************************************************************/
962FRAME(disable_int,sp,0,ra)
963        .set    noreorder
964        mfc0    t0,C0_SR
965        nop
966        and     t0,a0
967        mtc0    t0,C0_SR
968        j       ra
969        nop
970ENDFRAME(disable_int)
971
972
Note: See TracBrowser for help on using the repository browser.