source: rtems/cpukit/score/cpu/mips64orion/cpu_asm.S @ 5356c03

4.104.114.84.95
Last change on this file since 5356c03 was 0e7da15, checked in by Joel Sherrill <joel.sherrill@…>, on 07/11/00 at 21:38:41

Removed no cpu references.

  • Property mode set to 100644
File size: 21.9 KB
Line 
1/*  cpu_asm.S
2 *
3 *  This file contains the basic algorithms for all assembly code used
4 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5 *  in assembly language
6 *
7 *  Author:     Craig Lebakken <craigl@transition.com>
8 *
9 *  COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *
11 *  To anyone who acknowledges that this file is provided "AS IS"
12 *  without any express or implied warranty:
13 *      permission to use, copy, modify, and distribute this file
14 *      for any purpose is hereby granted without fee, provided that
15 *      the above copyright notice and this notice appears in all
16 *      copies, and that the name of Transition Networks not be used in
17 *      advertising or publicity pertaining to distribution of the
18 *      software without specific, written prior permission.
19 *      Transition Networks makes no representations about the suitability
20 *      of this software for any purpose.
21 *
22 *  Derived from source copyrighted as follows:
23 *
24 *  COPYRIGHT (c) 1989-1999.
25 *  On-Line Applications Research Corporation (OAR).
26 *
27 *  The license and distribution terms for this file may be
28 *  found in the file LICENSE in this distribution or at
29 *  http://www.OARcorp.com/rtems/license.html.
30 *
31 *  $Id$
32 */
33/* @(#)cpu_asm.S       08/20/96     1.15 */
34
35#include "cpu_asm.h"
36
37#include "iregdef.h"
38#include "idtcpu.h"
39
40#define FRAME(name,frm_reg,offset,ret_reg)      \
41        .globl  name;                           \
42        .ent    name;                           \
43name:;                                          \
44        .frame  frm_reg,offset,ret_reg
45#define ENDFRAME(name)                          \
46        .end name
47
48
49#define EXCP_STACK_SIZE (NREGS*R_SZ)
50
51#if __ghs__
52#define sd sw
53#define ld lw
54#define dmtc0 mtc0
55#define dsll sll
56#define dmfc0 mfc0
57#endif
58
59#if 1  /* 32 bit unsigned32 types */
60#define sint sw
61#define lint lw
62#define stackadd addiu
63#define intadd addu
64#define SZ_INT 4
65#define SZ_INT_POW2 2
66#else /* 64 bit unsigned32 types */
67#define sint dw
68#define lint dw
69#define stackadd daddiu
70#define intadd daddu
71#define SZ_INT 8
72#define SZ_INT_POW2 3
73#endif
74
75#ifdef __GNUC__
76#define EXTERN(x,size) .extern x,size
77#else
78#define EXTERN(x,size)
79#endif
80
81/* NOTE: these constants must match the Context_Control structure in cpu.h */
82#define S0_OFFSET 0
83#define S1_OFFSET 1
84#define S2_OFFSET 2
85#define S3_OFFSET 3
86#define S4_OFFSET 4
87#define S5_OFFSET 5
88#define S6_OFFSET 6
89#define S7_OFFSET 7
90#define SP_OFFSET 8
91#define FP_OFFSET 9
92#define RA_OFFSET 10
93#define C0_SR_OFFSET 11
94#define C0_EPC_OFFSET 12
95
96/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
97#define FP0_OFFSET  0
98#define FP1_OFFSET  1
99#define FP2_OFFSET  2
100#define FP3_OFFSET  3
101#define FP4_OFFSET  4
102#define FP5_OFFSET  5
103#define FP6_OFFSET  6
104#define FP7_OFFSET  7
105#define FP8_OFFSET  8
106#define FP9_OFFSET  9
107#define FP10_OFFSET 10
108#define FP11_OFFSET 11
109#define FP12_OFFSET 12
110#define FP13_OFFSET 13
111#define FP14_OFFSET 14
112#define FP15_OFFSET 15
113#define FP16_OFFSET 16
114#define FP17_OFFSET 17
115#define FP18_OFFSET 18
116#define FP19_OFFSET 19
117#define FP20_OFFSET 20
118#define FP21_OFFSET 21
119#define FP22_OFFSET 22
120#define FP23_OFFSET 23
121#define FP24_OFFSET 24
122#define FP25_OFFSET 25
123#define FP26_OFFSET 26
124#define FP27_OFFSET 27
125#define FP28_OFFSET 28
126#define FP29_OFFSET 29
127#define FP30_OFFSET 30
128#define FP31_OFFSET 31
129
130
131/*PAGE
132 *
133 *  _CPU_ISR_Get_level
134 */
135
136#if 0
137unsigned32 _CPU_ISR_Get_level( void )
138{
139  /*
140   *  This routine returns the current interrupt level.
141   */
142}
143#endif
144/* return the current exception level for the 4650 */
145FRAME(_CPU_ISR_Get_level,sp,0,ra)
146        mfc0 v0,C0_SR
147        nop
148        andi v0,SR_EXL
149        srl v0,1
150        j ra
151ENDFRAME(_CPU_ISR_Get_level)
152
153FRAME(_CPU_ISR_Set_level,sp,0,ra)
154        nop
155        mfc0 v0,C0_SR
156        nop
157        andi v0,SR_EXL
158        beqz v0,_CPU_ISR_Set_1          /* normalize v0 */
159        nop
160        li v0,1         
161_CPU_ISR_Set_1:
162        beq v0,a0,_CPU_ISR_Set_exit     /* if (current_level != new_level ) */
163        nop
164        bnez a0,_CPU_ISR_Set_2
165        nop
166        nop
167        mfc0 t0,C0_SR
168        nop
169        li t1,~SR_EXL
170        and t0,t1
171        nop
172        mtc0 t0,C0_SR                   /* disable exception level */
173        nop
174        j ra
175        nop
176_CPU_ISR_Set_2:
177        nop
178        mfc0 t0,C0_SR
179        nop
180        li t1,~SR_IE
181        and t0,t1
182        nop
183        mtc0 t0,C0_SR                   /* first disable ie bit (recommended) */
184        nop
185        ori t0,SR_EXL|SR_IE             /* enable exception level  */
186        nop
187        mtc0 t0,C0_SR         
188        nop
189_CPU_ISR_Set_exit:
190        j ra
191        nop
192ENDFRAME(_CPU_ISR_Set_level)
193
194/*
195 *  _CPU_Context_save_fp_context
196 *
197 *  This routine is responsible for saving the FP context
198 *  at *fp_context_ptr.  If the point to load the FP context
199 *  from is changed then the pointer is modified by this routine.
200 *
201 *  Sometimes a macro implementation of this is in cpu.h which dereferences
202 *  the ** and a similarly named routine in this file is passed something
203 *  like a (Context_Control_fp *).  The general rule on making this decision
204 *  is to avoid writing assembly language.
205 */
206
207/* void _CPU_Context_save_fp(
208 * void **fp_context_ptr
209 * )
210 * {
211 * }
212 */
213
214FRAME(_CPU_Context_save_fp,sp,0,ra)
215        .set noat
216        ld a1,(a0)
217        swc1 $f0,FP0_OFFSET*4(a1)
218        swc1 $f1,FP1_OFFSET*4(a1)
219        swc1 $f2,FP2_OFFSET*4(a1)
220        swc1 $f3,FP3_OFFSET*4(a1)
221        swc1 $f4,FP4_OFFSET*4(a1)
222        swc1 $f5,FP5_OFFSET*4(a1)
223        swc1 $f6,FP6_OFFSET*4(a1)
224        swc1 $f7,FP7_OFFSET*4(a1)
225        swc1 $f8,FP8_OFFSET*4(a1)
226        swc1 $f9,FP9_OFFSET*4(a1)
227        swc1 $f10,FP10_OFFSET*4(a1)
228        swc1 $f11,FP11_OFFSET*4(a1)
229        swc1 $f12,FP12_OFFSET*4(a1)
230        swc1 $f13,FP13_OFFSET*4(a1)
231        swc1 $f14,FP14_OFFSET*4(a1)
232        swc1 $f15,FP15_OFFSET*4(a1)
233        swc1 $f16,FP16_OFFSET*4(a1)
234        swc1 $f17,FP17_OFFSET*4(a1)
235        swc1 $f18,FP18_OFFSET*4(a1)
236        swc1 $f19,FP19_OFFSET*4(a1)
237        swc1 $f20,FP20_OFFSET*4(a1)
238        swc1 $f21,FP21_OFFSET*4(a1)
239        swc1 $f22,FP22_OFFSET*4(a1)
240        swc1 $f23,FP23_OFFSET*4(a1)
241        swc1 $f24,FP24_OFFSET*4(a1)
242        swc1 $f25,FP25_OFFSET*4(a1)
243        swc1 $f26,FP26_OFFSET*4(a1)
244        swc1 $f27,FP27_OFFSET*4(a1)
245        swc1 $f28,FP28_OFFSET*4(a1)
246        swc1 $f29,FP29_OFFSET*4(a1)
247        swc1 $f30,FP30_OFFSET*4(a1)
248        swc1 $f31,FP31_OFFSET*4(a1)
249        j ra
250        nop
251        .set at
252ENDFRAME(_CPU_Context_save_fp)
253
254/*
255 *  _CPU_Context_restore_fp_context
256 *
257 *  This routine is responsible for restoring the FP context
258 *  at *fp_context_ptr.  If the point to load the FP context
259 *  from is changed then the pointer is modified by this routine.
260 *
261 *  Sometimes a macro implementation of this is in cpu.h which dereferences
262 *  the ** and a similarly named routine in this file is passed something
263 *  like a (Context_Control_fp *).  The general rule on making this decision
264 *  is to avoid writing assembly language.
265 */
266
267/* void _CPU_Context_restore_fp(
268 * void **fp_context_ptr
269 * )
270 * {
271 * }
272 */
273
274FRAME(_CPU_Context_restore_fp,sp,0,ra)
275        .set noat
276        ld a1,(a0)
277        lwc1 $f0,FP0_OFFSET*4(a1)
278        lwc1 $f1,FP1_OFFSET*4(a1)
279        lwc1 $f2,FP2_OFFSET*4(a1)
280        lwc1 $f3,FP3_OFFSET*4(a1)
281        lwc1 $f4,FP4_OFFSET*4(a1)
282        lwc1 $f5,FP5_OFFSET*4(a1)
283        lwc1 $f6,FP6_OFFSET*4(a1)
284        lwc1 $f7,FP7_OFFSET*4(a1)
285        lwc1 $f8,FP8_OFFSET*4(a1)
286        lwc1 $f9,FP9_OFFSET*4(a1)
287        lwc1 $f10,FP10_OFFSET*4(a1)
288        lwc1 $f11,FP11_OFFSET*4(a1)
289        lwc1 $f12,FP12_OFFSET*4(a1)
290        lwc1 $f13,FP13_OFFSET*4(a1)
291        lwc1 $f14,FP14_OFFSET*4(a1)
292        lwc1 $f15,FP15_OFFSET*4(a1)
293        lwc1 $f16,FP16_OFFSET*4(a1)
294        lwc1 $f17,FP17_OFFSET*4(a1)
295        lwc1 $f18,FP18_OFFSET*4(a1)
296        lwc1 $f19,FP19_OFFSET*4(a1)
297        lwc1 $f20,FP20_OFFSET*4(a1)
298        lwc1 $f21,FP21_OFFSET*4(a1)
299        lwc1 $f22,FP22_OFFSET*4(a1)
300        lwc1 $f23,FP23_OFFSET*4(a1)
301        lwc1 $f24,FP24_OFFSET*4(a1)
302        lwc1 $f25,FP25_OFFSET*4(a1)
303        lwc1 $f26,FP26_OFFSET*4(a1)
304        lwc1 $f27,FP27_OFFSET*4(a1)
305        lwc1 $f28,FP28_OFFSET*4(a1)
306        lwc1 $f29,FP29_OFFSET*4(a1)
307        lwc1 $f30,FP30_OFFSET*4(a1)
308        lwc1 $f31,FP31_OFFSET*4(a1)
309        j ra
310        nop
311        .set at
312ENDFRAME(_CPU_Context_restore_fp)
313
314/*  _CPU_Context_switch
315 *
316 *  This routine performs a normal non-FP context switch.
317 */
318
319/* void _CPU_Context_switch(
320 * Context_Control  *run,
321 * Context_Control  *heir
322 * )
323 * {
324 * }
325 */
326
327FRAME(_CPU_Context_switch,sp,0,ra)
328
329        mfc0 t0,C0_SR
330        li t1,~SR_IE
331        sd t0,C0_SR_OFFSET*8(a0)        /* save status register */
332        and t0,t1
333        mtc0 t0,C0_SR                   /* first disable ie bit (recommended) */
334        ori t0,SR_EXL|SR_IE             /* enable exception level to disable interrupts */
335        mtc0 t0,C0_SR
336
337        sd ra,RA_OFFSET*8(a0)           /* save current context */
338        sd sp,SP_OFFSET*8(a0)
339        sd fp,FP_OFFSET*8(a0)
340        sd s0,S0_OFFSET*8(a0)
341        sd s1,S1_OFFSET*8(a0)
342        sd s2,S2_OFFSET*8(a0)
343        sd s3,S3_OFFSET*8(a0)
344        sd s4,S4_OFFSET*8(a0)
345        sd s5,S5_OFFSET*8(a0)
346        sd s6,S6_OFFSET*8(a0)
347        sd s7,S7_OFFSET*8(a0)
348        dmfc0 t0,C0_EPC
349        sd t0,C0_EPC_OFFSET*8(a0)
350
351_CPU_Context_switch_restore:
352        ld s0,S0_OFFSET*8(a1)           /* restore context */
353        ld s1,S1_OFFSET*8(a1)
354        ld s2,S2_OFFSET*8(a1)
355        ld s3,S3_OFFSET*8(a1)
356        ld s4,S4_OFFSET*8(a1)
357        ld s5,S5_OFFSET*8(a1)
358        ld s6,S6_OFFSET*8(a1)
359        ld s7,S7_OFFSET*8(a1)
360        ld fp,FP_OFFSET*8(a1)
361        ld sp,SP_OFFSET*8(a1)
362        ld ra,RA_OFFSET*8(a1)
363        ld t0,C0_EPC_OFFSET*8(a1)
364        dmtc0 t0,C0_EPC
365        ld t0,C0_SR_OFFSET*8(a1)
366        andi t0,SR_EXL
367        bnez t0,_CPU_Context_1          /* set exception level from restore context */
368        li t0,~SR_EXL
369        mfc0 t1,C0_SR
370        nop
371        and t1,t0
372        mtc0 t1,C0_SR
373_CPU_Context_1:
374        j ra
375        nop
376ENDFRAME(_CPU_Context_switch)
377
378/*
379 *  _CPU_Context_restore
380 *
381 *  This routine is generally used only to restart self in an
382 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
383 *
384 *  NOTE: May be unnecessary to reload some registers.
385 */
386
387#if 0
388void _CPU_Context_restore(
389  Context_Control *new_context
390)
391{
392}
393#endif
394
395FRAME(_CPU_Context_restore,sp,0,ra)
396        dadd a1,a0,zero
397        j _CPU_Context_switch_restore
398        nop
399ENDFRAME(_CPU_Context_restore)
400
401EXTERN(_ISR_Nest_level, SZ_INT)
402EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
403EXTERN(_Context_Switch_necessary,SZ_INT)
404EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
405.extern _Thread_Dispatch
406.extern _ISR_Vector_table
407
408/*  void __ISR_Handler()
409 *
410 *  This routine provides the RTEMS interrupt management.
411 *
412 */
413
414#if 0
415void _ISR_Handler()
416{
417   /*
418    *  This discussion ignores a lot of the ugly details in a real
419    *  implementation such as saving enough registers/state to be
420    *  able to do something real.  Keep in mind that the goal is
421    *  to invoke a user's ISR handler which is written in C and
422    *  uses a certain set of registers.
423    *
424    *  Also note that the exact order is to a large extent flexible.
425    *  Hardware will dictate a sequence for a certain subset of
426    *  _ISR_Handler while requirements for setting
427    */
428
429  /*
430   *  At entry to "common" _ISR_Handler, the vector number must be
431   *  available.  On some CPUs the hardware puts either the vector
432   *  number or the offset into the vector table for this ISR in a
433   *  known place.  If the hardware does not give us this information,
434   *  then the assembly portion of RTEMS for this port will contain
435   *  a set of distinct interrupt entry points which somehow place
436   *  the vector number in a known place (which is safe if another
437   *  interrupt nests this one) and branches to _ISR_Handler.
438   *
439   */
440#endif
441FRAME(_ISR_Handler,sp,0,ra)
442.set noreorder
443#if USE_IDTKIT
444/* IDT/Kit incorrectly adds 4 to EPC before returning.  This compensates */
445        lreg    k0, R_EPC*R_SZ(sp)
446        daddiu    k0,k0,-4
447        sreg    k0, R_EPC*R_SZ(sp)
448        lreg    k0, R_CAUSE*R_SZ(sp)
449        li      k1, ~CAUSE_BD
450        and     k0, k1
451        sreg    k0, R_CAUSE*R_SZ(sp)
452#endif
453       
454/* save registers not already saved by IDT/sim */
455        stackadd sp,sp,-EXCP_STACK_SIZE /* store ra on the stack */
456
457        sreg    ra, R_RA*R_SZ(sp)
458        sreg    v0, R_V0*R_SZ(sp)
459        sreg    v1, R_V1*R_SZ(sp)
460        sreg    a0, R_A0*R_SZ(sp)
461        sreg    a1, R_A1*R_SZ(sp)
462        sreg    a2, R_A2*R_SZ(sp)
463        sreg    a3, R_A3*R_SZ(sp)
464        sreg    t0, R_T0*R_SZ(sp)
465        sreg    t1, R_T1*R_SZ(sp)
466        sreg    t2, R_T2*R_SZ(sp)
467        sreg    t3, R_T3*R_SZ(sp)
468        sreg    t4, R_T4*R_SZ(sp)
469        sreg    t5, R_T5*R_SZ(sp)
470        sreg    t6, R_T6*R_SZ(sp)
471        sreg    t7, R_T7*R_SZ(sp)
472        mflo    k0
473        sreg    t8, R_T8*R_SZ(sp)
474        sreg    k0, R_MDLO*R_SZ(sp)
475        sreg    t9, R_T9*R_SZ(sp)
476        mfhi    k0
477        sreg    gp, R_GP*R_SZ(sp)
478        sreg    fp, R_FP*R_SZ(sp)
479        sreg    k0, R_MDHI*R_SZ(sp)
480        .set noat
481        sreg    AT, R_AT*R_SZ(sp)
482        .set at
483
484        stackadd sp,sp,-40              /* store ra on the stack */
485        sd ra,32(sp)
486
487/* determine if an interrupt generated this exception */
488        mfc0 k0,C0_CAUSE
489        and k1,k0,CAUSE_EXCMASK
490        bnez k1,_ISR_Handler_prom_exit /* not an external interrupt, pass exception to Monitor */
491        mfc0 k1,C0_SR
492        and k0,k1
493        and k0,CAUSE_IPMASK
494        beq k0,zero,_ISR_Handler_quick_exit /* external interrupt not enabled, ignore */
495        nop
496
497  /*
498   *  save some or all context on stack
499   *  may need to save some special interrupt information for exit
500   *
501   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
502   *    if ( _ISR_Nest_level == 0 )
503   *      switch to software interrupt stack
504   *  #endif
505   */
506#if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
507        lint t0,_ISR_Nest_level
508        beq t0, zero,  _ISR_Handler_1
509        nop
510        /* switch stacks */     
511_ISR_Handler_1:
512#else
513        lint t0,_ISR_Nest_level
514#endif
515  /*
516   *  _ISR_Nest_level++;
517   */
518        addi t0,t0,1
519        sint t0,_ISR_Nest_level
520  /*
521   *  _Thread_Dispatch_disable_level++;
522   */
523        lint t1,_Thread_Dispatch_disable_level
524        addi t1,t1,1
525        sint t1,_Thread_Dispatch_disable_level
526#if 0
527        nop
528        j _ISR_Handler_4
529        nop
530  /*
531   *  while ( interrupts_pending(cause_reg) ) {
532   *     vector = BITFIELD_TO_INDEX(cause_reg);
533   *     (*_ISR_Vector_table[ vector ])( vector );
534   *  }
535   */
536_ISR_Handler_2:
537/* software interrupt priorities can be applied here */
538        li t1,-1
539/* convert bit field into interrupt index */
540_ISR_Handler_3:
541        andi t2,t0,1
542        addi t1,1
543        beql t2,zero,_ISR_Handler_3
544        dsrl t0,1
545        li t1,7
546        dsll t1,3                       /* convert index to byte offset (*8) */
547        la t3,_ISR_Vector_table
548        intadd t1,t3
549        lint t1,(t1)
550        jalr t1
551        nop
552        j _ISR_Handler_5
553        nop
554_ISR_Handler_4:
555        mfc0 t0,C0_CAUSE
556        andi t0,CAUSE_IPMASK
557        bne t0,zero,_ISR_Handler_2
558        dsrl t0,t0,8
559_ISR_Handler_5:
560#else
561        nop
562        li t1,7
563        dsll t1,t1,SZ_INT_POW2
564        la t3,_ISR_Vector_table
565        intadd t1,t3
566        lint t1,(t1)
567        jalr t1
568        nop
569#endif
570  /*
571   *  --_ISR_Nest_level;
572   */
573        lint t2,_ISR_Nest_level
574        addi t2,t2,-1
575        sint t2,_ISR_Nest_level
576  /*
577   *  --_Thread_Dispatch_disable_level;
578   */
579        lint t1,_Thread_Dispatch_disable_level
580        addi t1,t1,-1
581        sint t1,_Thread_Dispatch_disable_level
582  /*
583   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
584   *    goto the label "exit interrupt (simple case)"
585   */
586        or t0,t2,t1
587        bne t0,zero,_ISR_Handler_exit
588        nop
589  /*
590   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
591   *    restore stack
592   *  #endif
593   * 
594   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
595   *    goto the label "exit interrupt (simple case)"
596   */
597        lint t0,_Context_Switch_necessary
598        lint t1,_ISR_Signals_to_thread_executing
599        or t0,t0,t1
600        beq t0,zero,_ISR_Handler_exit
601        nop
602
603  /*
604   *  call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
605   */
606        jal _Thread_Dispatch
607        nop
608  /*
609   *  prepare to get out of interrupt
610   *  return from interrupt  (maybe to _ISR_Dispatch)
611   *
612   *  LABEL "exit interrupt (simple case):
613   *  prepare to get out of interrupt
614   *  return from interrupt
615   */
616_ISR_Handler_exit:
617        ld ra,32(sp)
618        stackadd sp,sp,40
619
620/* restore interrupt context from stack */
621        lreg    k0, R_MDLO*R_SZ(sp)
622        mtlo    k0
623        lreg    k0, R_MDHI*R_SZ(sp)
624        lreg    a2, R_A2*R_SZ(sp)
625        mthi    k0
626        lreg    a3, R_A3*R_SZ(sp)
627        lreg    t0, R_T0*R_SZ(sp)
628        lreg    t1, R_T1*R_SZ(sp)
629        lreg    t2, R_T2*R_SZ(sp)
630        lreg    t3, R_T3*R_SZ(sp)
631        lreg    t4, R_T4*R_SZ(sp)
632        lreg    t5, R_T5*R_SZ(sp)
633        lreg    t6, R_T6*R_SZ(sp)
634        lreg    t7, R_T7*R_SZ(sp)
635        lreg    t8, R_T8*R_SZ(sp)
636        lreg    t9, R_T9*R_SZ(sp)
637        lreg    gp, R_GP*R_SZ(sp)
638        lreg    fp, R_FP*R_SZ(sp)
639        lreg    ra, R_RA*R_SZ(sp)
640        lreg    a0, R_A0*R_SZ(sp)
641        lreg    a1, R_A1*R_SZ(sp)
642        lreg    v1, R_V1*R_SZ(sp)
643        lreg    v0, R_V0*R_SZ(sp)
644        .set noat
645        lreg    AT, R_AT*R_SZ(sp)
646        .set at
647
648        stackadd sp,sp,EXCP_STACK_SIZE /* store ra on the stack */
649
650#if USE_IDTKIT
651/* we handled exception, so return non-zero value */
652        li v0,1
653#endif
654
655_ISR_Handler_quick_exit:
656#ifdef USE_IDTKIT
657        j ra
658#else
659        eret
660#endif
661        nop
662
663_ISR_Handler_prom_exit:
664#ifdef CPU_R3000
665        la      k0, (R_VEC+((48)*8))
666#endif
667
668#ifdef CPU_R4000
669        la      k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */
670#endif
671        j       k0
672        nop
673
674       .set    reorder
675
676ENDFRAME(_ISR_Handler)
677
678
679FRAME(mips_enable_interrupts,sp,0,ra)
680        mfc0 t0,C0_SR           /* get status reg */
681        nop
682        or t0,t0,a0             
683        mtc0 t0,C0_SR           /* save updated status reg */
684        j ra
685        nop
686ENDFRAME(mips_enable_interrupts)
687
688FRAME(mips_disable_interrupts,sp,0,ra)
689        mfc0 v0,C0_SR           /* get status reg */
690        li t1,SR_IMASK          /* t1 = load interrupt mask word */
691        not t0,t1               /* t0 = ~t1 */
692        and t0,v0               /* clear imask bits */
693        mtc0 t0,C0_SR           /* save status reg */
694        and v0,t1               /* mask return value (only return imask bits) */
695        jr ra
696        nop
697ENDFRAME(mips_disable_interrupts)
698
699FRAME(mips_enable_global_interrupts,sp,0,ra)
700        mfc0 t0,C0_SR           /* get status reg */
701        nop
702        ori t0,SR_IE
703        mtc0 t0,C0_SR           /* save updated status reg */
704        j ra
705        nop
706ENDFRAME(mips_enable_global_interrupts)
707
708FRAME(mips_disable_global_interrupts,sp,0,ra)
709        li t1,SR_IE
710        mfc0 t0,C0_SR           /* get status reg */
711        not t1
712        and t0,t1
713        mtc0 t0,C0_SR           /* save updated status reg */
714        j ra
715        nop
716ENDFRAME(mips_disable_global_interrupts)
717
718/* return the value of the status register in v0.  Used for debugging */
719FRAME(mips_get_sr,sp,0,ra)
720        mfc0 v0,C0_SR
721        j ra
722        nop
723ENDFRAME(mips_get_sr)
724
725FRAME(mips_break,sp,0,ra)
726#if 1
727        break 0x0
728        j mips_break
729#else
730        j ra
731#endif
732        nop
733ENDFRAME(mips_break)
734
735/*PAGE
736 *
737 *  _CPU_Internal_threads_Idle_thread_body
738 *
739 *  NOTES:
740 *
741 *  1. This is the same as the regular CPU independent algorithm.
742 *
743 *  2. If you implement this using a "halt", "idle", or "shutdown"
744 *     instruction, then don't forget to put it in an infinite loop.
745 *
746 *  3. Be warned. Some processors with onboard DMA have been known
747 *     to stop the DMA if the CPU were put in IDLE mode.  This might
748 *     also be a problem with other on-chip peripherals.  So use this
749 *     hook with caution.
750 */
751
752FRAME(_CPU_Thread_Idle_body,sp,0,ra)
753        wait                    /* enter low power mode */
754        j _CPU_Thread_Idle_body
755        nop
756ENDFRAME(_CPU_Thread_Idle_body)
757
758#define VEC_CODE_LENGTH 10*4
759
760/**************************************************************************
761**
762**      init_exc_vecs() - moves the exception code into the addresses
763**                        reserved for exception vectors
764**
765**      UTLB Miss exception vector at address 0x80000000
766**
767**      General exception vector at address 0x80000080
768**
769**      RESET exception vector is at address 0xbfc00000
770**
771***************************************************************************/
772
773#define INITEXCFRM ((2*4)+4)            /* ra + 2 arguments */
774FRAME(init_exc_vecs,sp,0,ra)
775/* This code yanked from SIM */
776#if defined(CPU_R3000)
777        .set    noreorder
778        la      t1,exc_utlb_code
779        la      t2,exc_norm_code
780        li      t3,UT_VEC
781        li      t4,E_VEC
782        li      t5,VEC_CODE_LENGTH
7831:
784        lw      t6,0(t1)
785        lw      t7,0(t2)
786        sw      t6,0(t3)
787        sw      t7,0(t4)
788        addiu   t1,4
789        addiu   t3,4
790        addiu   t4,4
791        subu    t5,4
792        bne     t5,zero,1b
793        addiu   t2,4
794        move    t5,ra           # assumes clear_cache doesnt use t5
795        li      a0,UT_VEC
796        jal     clear_cache
797        li      a1,VEC_CODE_LENGTH
798        nop
799        li      a0,E_VEC
800        jal     clear_cache
801        li      a1,VEC_CODE_LENGTH
802        move    ra,t5           # restore ra
803        j       ra
804        nop
805        .set    reorder
806#endif
807#if defined(CPU_R4000)
808        .set reorder
809        move    t5,ra           # assumes clear_cache doesnt use t5
810
811        /* TLB exception vector */
812        la      t1,exc_tlb_code
813        li      t2,T_VEC |K1BASE
814        li      t3,VEC_CODE_LENGTH
8151:
816        lw      t6,0(t1)
817        addiu   t1,4
818        subu    t3,4
819        sw      t6,0(t2)
820        addiu   t2,4
821        bne     t3,zero,1b
822
823        li      a0,T_VEC
824        li      a1,VEC_CODE_LENGTH
825        jal     clear_cache
826
827        la      t1,exc_xtlb_code
828        li      t2,X_VEC |K1BASE
829        li      t3,VEC_CODE_LENGTH
8301:
831        lw      t6,0(t1)
832        addiu   t1,4
833        subu    t3,4
834        sw      t6,0(t2)
835        addiu   t2,4
836        bne     t3,zero,1b
837
838        /* extended TLB exception vector */
839        li      a0,X_VEC
840        li      a1,VEC_CODE_LENGTH
841        jal     clear_cache
842
843        /* cache error exception vector */
844        la      t1,exc_cache_code
845        li      t2,C_VEC |K1BASE
846        li      t3,VEC_CODE_LENGTH
8471:
848        lw      t6,0(t1)
849        addiu   t1,4
850        subu    t3,4
851        sw      t6,0(t2)
852        addiu   t2,4
853        bne     t3,zero,1b
854
855        li      a0,C_VEC
856        li      a1,VEC_CODE_LENGTH
857        jal     clear_cache
858
859        /* normal exception vector */
860        la      t1,exc_norm_code
861        li      t2,E_VEC |K1BASE
862        li      t3,VEC_CODE_LENGTH
8631:
864        lw      t6,0(t1)
865        addiu   t1,4
866        subu    t3,4
867        sw      t6,0(t2)
868        addiu   t2,4
869        bne     t3,zero,1b
870
871        li      a0,E_VEC
872        li      a1,VEC_CODE_LENGTH
873        jal     clear_cache
874
875        move    ra,t5           # restore ra
876        j       ra
877#endif
878ENDFRAME(init_exc_vecs)
879
880
881#if defined(CPU_R4000)
882FRAME(exc_tlb_code,sp,0,ra)
883#ifdef CPU_R3000
884        la      k0, (R_VEC+((48)*8))
885#endif
886
887#ifdef CPU_R4000
888        la      k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */
889#endif
890        j       k0
891        nop
892
893ENDFRAME(exc_tlb_code)
894
895
896FRAME(exc_xtlb_code,sp,0,ra)
897#ifdef CPU_R3000
898        la      k0, (R_VEC+((48)*8))
899#endif
900
901#ifdef CPU_R4000
902        la      k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */
903#endif
904        j       k0
905        nop
906
907ENDFRAME(exc_xtlb_code)
908
909
910FRAME(exc_cache_code,sp,0,ra)
911#ifdef CPU_R3000
912        la      k0, (R_VEC+((48)*8))
913#endif
914
915#ifdef CPU_R4000
916        la      k0, (R_VEC+((112)*8)) /* R4000 Sim's location is different */
917#endif
918        j       k0
919        nop
920
921ENDFRAME(exc_cache_code)
922
923
924FRAME(exc_norm_code,sp,0,ra)
925        la      k0, _ISR_Handler /* generic external int hndlr */
926        j       k0
927        nop
928        subu    sp, EXCP_STACK_SIZE             /* set up local stack frame */
929ENDFRAME(exc_norm_code)
930#endif
931
932/**************************************************************************
933**
934** enable_int(mask) - enables interrupts - mask is positioned so it only
935**                      needs to be or'ed into the status reg. This
936**                      also does some other things !!!! caution should
937**                      be used if invoking this while in the middle
938**                      of a debugging session where the client may have
939**                      nested interrupts.
940**
941****************************************************************************/
942FRAME(enable_int,sp,0,ra)
943        .set    noreorder
944        mfc0    t0,C0_SR
945        or      a0,1
946        or      t0,a0
947        mtc0    t0,C0_SR
948        j       ra
949        nop
950        .set    reorder
951ENDFRAME(enable_int)
952
953
954/***************************************************************************
955**
956**      disable_int(mask) - disable the interrupt - mask is the complement
957**                          of the bits to be cleared - i.e. to clear ext int
958**                          5 the mask would be - 0xffff7fff
959**
960****************************************************************************/
961FRAME(disable_int,sp,0,ra)
962        .set    noreorder
963        mfc0    t0,C0_SR
964        nop
965        and     t0,a0
966        mtc0    t0,C0_SR
967        j       ra
968        nop
969ENDFRAME(disable_int)
970
971
Note: See TracBrowser for help on using the repository browser.