source: rtems/cpukit/score/cpu/mips/cpu_asm.S @ 0b2bcb1

4.104.114.84.95
Last change on this file since 0b2bcb1 was 0b2bcb1, checked in by Greg Menke <gregory.menke@…>, on 01/03/05 at 17:41:22

PR 737

  • Property mode set to 100644
File size: 29.9 KB
Line 
1/*
2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
22 *          the baseline of the more general MIPS port. 
23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
26 *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
27 *          performance, tweaking this code and the isr vectoring routines
28 *          to reduce overhead & latencies.  Added optional
29 *          instrumentation as well.
30 *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
31 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
32 *          and deferred FP contexts.
33 *    2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing
34 *          by increasing the amount of context saved/restored.
35 *    2004: 24March, Art Ferrer, NASA/GSFC, added save of FP status/control
36 *          register to fix intermittent FP error encountered on ST5 mission
37 *          implementation on Mongoose V processor.
38 *    2004: April 7, Greg Menke <gregory.menke@gsfc.nasa.gov> Added __mips==32
39 *          support for R4000 processors running 32 bit code.  Fixed #define
40 *          problems that caused fpu code to always be included even when no
41 *          fpu is present.
42 * 
43 *  COPYRIGHT (c) 1989-2002.
44 *  On-Line Applications Research Corporation (OAR).
45 *
46 *  The license and distribution terms for this file may be
47 *  found in the file LICENSE in this distribution or at
48 *  http://www.rtems.com/license/LICENSE.
49 *
50 *  $Id$
51 */
52
53#include <rtems/asm.h>
54#include <rtems/mips/iregdef.h>
55#include <rtems/mips/idtcpu.h>
56
57#define ASSEMBLY_ONLY
58#include <rtems/score/cpu.h>
59
60#if TRUE
61#else
62#error TRUE is not true
63#endif
64#if FALSE
65#error FALSE is not false
66#else
67#endif
68
69/*             
70#if ( CPU_HARDWARE_FP == TRUE )
71#warning CPU_HARDWARE_FP == TRUE
72#else
73#warning CPU_HARDWARE_FP != TRUE
74#endif
75*/
76       
77               
78/* enable debugging shadow writes to misc ram, this is a vestigal
79* Mongoose-ism debug tool- but may be handy in the future so we
80* left it in...
81*/
82
83/* #define INSTRUMENT_ISR_VECTORING */
84/* #define INSTRUMENT_EXECUTING_THREAD */
85
86
87       
88/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
89 *  and MIPS ISA Level 1 (R3xxx).
90 */
91
92#if __mips == 3
93/* 64 bit register operations */
94#define NOP     nop
95/*
96#define ADD     dadd
97#define MFCO    dmfc0
98#define MTCO    dmtc0
99*/
100#define ADD     add     
101#define MFCO    mfc0
102#define MTCO    mtc0
103#define STREG   sd
104#define LDREG   ld
105#define ADDU    addu
106#define ADDIU   addiu
107#define R_SZ    8
108#define F_SZ    8
109#define SZ_INT  8
110#define SZ_INT_POW2 3
111
112/* XXX if we don't always want 64 bit register ops, then another ifdef */
113
114#elif (__mips == 1 ) || (__mips == 32)
115/* 32 bit register operations*/
116#define NOP     nop
117#define ADD     add
118#define STREG   sw
119#define LDREG   lw
120#define MFCO    mfc0
121#define MTCO    mtc0
122#define ADDU    add
123#define ADDIU   addi
124#define R_SZ    4
125#define F_SZ    4
126#define SZ_INT  4
127#define SZ_INT_POW2 2
128#else
129#error "mips assembly: what size registers do I deal with?"
130#endif
131
132
133#define ISR_VEC_SIZE    4
134#define EXCP_STACK_SIZE (NREGS*R_SZ)
135
136       
137#ifdef __GNUC__
138#define ASM_EXTERN(x,size) .extern x,size
139#else
140#define ASM_EXTERN(x,size)
141#endif
142
143/* NOTE: these constants must match the Context_Control structure in cpu.h */
144#define S0_OFFSET 0
145#define S1_OFFSET 1
146#define S2_OFFSET 2
147#define S3_OFFSET 3
148#define S4_OFFSET 4
149#define S5_OFFSET 5
150#define S6_OFFSET 6
151#define S7_OFFSET 7
152#define SP_OFFSET 8
153#define FP_OFFSET 9
154#define RA_OFFSET 10
155#define C0_SR_OFFSET 11
156#define C0_EPC_OFFSET 12
157
158/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
159#define FP0_OFFSET  0
160#define FP1_OFFSET  1
161#define FP2_OFFSET  2
162#define FP3_OFFSET  3
163#define FP4_OFFSET  4
164#define FP5_OFFSET  5
165#define FP6_OFFSET  6
166#define FP7_OFFSET  7
167#define FP8_OFFSET  8
168#define FP9_OFFSET  9
169#define FP10_OFFSET 10
170#define FP11_OFFSET 11
171#define FP12_OFFSET 12
172#define FP13_OFFSET 13
173#define FP14_OFFSET 14
174#define FP15_OFFSET 15
175#define FP16_OFFSET 16
176#define FP17_OFFSET 17
177#define FP18_OFFSET 18
178#define FP19_OFFSET 19
179#define FP20_OFFSET 20
180#define FP21_OFFSET 21
181#define FP22_OFFSET 22
182#define FP23_OFFSET 23
183#define FP24_OFFSET 24
184#define FP25_OFFSET 25
185#define FP26_OFFSET 26
186#define FP27_OFFSET 27
187#define FP28_OFFSET 28
188#define FP29_OFFSET 29
189#define FP30_OFFSET 30
190#define FP31_OFFSET 31
191#define FPCS_OFFSET 32
192
193       
194ASM_EXTERN(__exceptionStackFrame, SZ_INT)
195
196       
197               
198/*
199 *  _CPU_Context_save_fp_context
200 *
201 *  This routine is responsible for saving the FP context
202 *  at *fp_context_ptr.  If the point to load the FP context
203 *  from is changed then the pointer is modified by this routine.
204 *
205 *  Sometimes a macro implementation of this is in cpu.h which dereferences
206 *  the ** and a similarly named routine in this file is passed something
207 *  like a (Context_Control_fp *).  The general rule on making this decision
208 *  is to avoid writing assembly language.
209 */
210
211/* void _CPU_Context_save_fp(
212 *   void **fp_context_ptr
213 * );
214 */
215
216#if ( CPU_HARDWARE_FP == TRUE )
217FRAME(_CPU_Context_save_fp,sp,0,ra)
218        .set noreorder
219        .set noat
220
221        /*
222        ** Make sure the FPU is on before we save state.  This code
223        ** is here because the FPU context switch might occur when an
224        ** integer task is switching out with a FP task switching in.
225        */
226        MFC0    t0,C0_SR
227        li      t2,SR_CU1       
228        move    t1,t0
229        or      t0,t2           /* turn on the fpu */
230#if __mips == 3
231        li      t2,SR_EXL | SR_IE
232#elif __mips == 1
233        li      t2,SR_IEC
234#endif
235        not     t2
236        and     t0,t2           /* turn off interrupts */       
237        MTC0    t0,C0_SR       
238               
239        ld      a1,(a0)
240        move    t0,ra
241        jal     _CPU_Context_save_fp_from_exception
242        NOP
243       
244        /*
245        ** Reassert the task's state because we've not saved it yet.
246        */
247        MTC0    t1,C0_SR       
248        j       t0     
249        NOP
250       
251        .globl _CPU_Context_save_fp_from_exception
252_CPU_Context_save_fp_from_exception:
253        swc1 $f0,FP0_OFFSET*F_SZ(a1)
254        swc1 $f1,FP1_OFFSET*F_SZ(a1)
255        swc1 $f2,FP2_OFFSET*F_SZ(a1)
256        swc1 $f3,FP3_OFFSET*F_SZ(a1)
257        swc1 $f4,FP4_OFFSET*F_SZ(a1)
258        swc1 $f5,FP5_OFFSET*F_SZ(a1)
259        swc1 $f6,FP6_OFFSET*F_SZ(a1)
260        swc1 $f7,FP7_OFFSET*F_SZ(a1)
261        swc1 $f8,FP8_OFFSET*F_SZ(a1)
262        swc1 $f9,FP9_OFFSET*F_SZ(a1)
263        swc1 $f10,FP10_OFFSET*F_SZ(a1)
264        swc1 $f11,FP11_OFFSET*F_SZ(a1)
265        swc1 $f12,FP12_OFFSET*F_SZ(a1)
266        swc1 $f13,FP13_OFFSET*F_SZ(a1)
267        swc1 $f14,FP14_OFFSET*F_SZ(a1)
268        swc1 $f15,FP15_OFFSET*F_SZ(a1)
269        swc1 $f16,FP16_OFFSET*F_SZ(a1)
270        swc1 $f17,FP17_OFFSET*F_SZ(a1)
271        swc1 $f18,FP18_OFFSET*F_SZ(a1)
272        swc1 $f19,FP19_OFFSET*F_SZ(a1)
273        swc1 $f20,FP20_OFFSET*F_SZ(a1)
274        swc1 $f21,FP21_OFFSET*F_SZ(a1)
275        swc1 $f22,FP22_OFFSET*F_SZ(a1)
276        swc1 $f23,FP23_OFFSET*F_SZ(a1)
277        swc1 $f24,FP24_OFFSET*F_SZ(a1)
278        swc1 $f25,FP25_OFFSET*F_SZ(a1)
279        swc1 $f26,FP26_OFFSET*F_SZ(a1)
280        swc1 $f27,FP27_OFFSET*F_SZ(a1)
281        swc1 $f28,FP28_OFFSET*F_SZ(a1)
282        swc1 $f29,FP29_OFFSET*F_SZ(a1)
283        swc1 $f30,FP30_OFFSET*F_SZ(a1)
284        swc1 $f31,FP31_OFFSET*F_SZ(a1)
285        cfc1 a0,$31                    /* Read FP status/conrol reg */
286        cfc1 a0,$31                    /* Two reads clear pipeline */
287        NOP
288        NOP
289        sw a0, FPCS_OFFSET*F_SZ(a1)    /* Store value to FPCS location */
290        NOP
291        j ra
292        NOP
293        .set at
294ENDFRAME(_CPU_Context_save_fp)
295#endif
296
297/*
298 *  _CPU_Context_restore_fp_context
299 *
300 *  This routine is responsible for restoring the FP context
301 *  at *fp_context_ptr.  If the point to load the FP context
302 *  from is changed then the pointer is modified by this routine.
303 *
304 *  Sometimes a macro implementation of this is in cpu.h which dereferences
305 *  the ** and a similarly named routine in this file is passed something
306 *  like a (Context_Control_fp *).  The general rule on making this decision
307 *  is to avoid writing assembly language.
308 */
309
310/* void _CPU_Context_restore_fp(
311 *   void **fp_context_ptr
312 * )
313 */
314
315#if ( CPU_HARDWARE_FP == TRUE )
316FRAME(_CPU_Context_restore_fp,sp,0,ra)
317        .set noat
318        .set noreorder
319       
320        /*
321        ** Make sure the FPU is on before we retrieve state.  This code
322        ** is here because the FPU context switch might occur when an
323        ** integer task is switching out with a FP task switching in.
324        */
325        MFC0    t0,C0_SR
326        li      t2,SR_CU1       
327        move    t1,t0
328        or      t0,t2           /* turn on the fpu */
329#if __mips == 3
330        li      t2,SR_EXL | SR_IE
331#elif __mips == 1
332        li      t2,SR_IEC
333#endif
334        not     t2
335        and     t0,t2           /* turn off interrupts */       
336        MTC0    t0,C0_SR       
337
338        ld      a1,(a0)
339        move    t0,ra
340        jal     _CPU_Context_restore_fp_from_exception
341        NOP
342
343        /*
344        ** Reassert the old task's state because we've not restored the
345        ** new one yet.
346        */
347        MTC0    t1,C0_SR       
348        j       t0
349        NOP
350       
351        .globl _CPU_Context_restore_fp_from_exception
352_CPU_Context_restore_fp_from_exception:
353        lwc1 $f0,FP0_OFFSET*4(a1)
354        lwc1 $f1,FP1_OFFSET*4(a1)
355        lwc1 $f2,FP2_OFFSET*4(a1)
356        lwc1 $f3,FP3_OFFSET*4(a1)
357        lwc1 $f4,FP4_OFFSET*4(a1)
358        lwc1 $f5,FP5_OFFSET*4(a1)
359        lwc1 $f6,FP6_OFFSET*4(a1)
360        lwc1 $f7,FP7_OFFSET*4(a1)
361        lwc1 $f8,FP8_OFFSET*4(a1)
362        lwc1 $f9,FP9_OFFSET*4(a1)
363        lwc1 $f10,FP10_OFFSET*4(a1)
364        lwc1 $f11,FP11_OFFSET*4(a1)
365        lwc1 $f12,FP12_OFFSET*4(a1)
366        lwc1 $f13,FP13_OFFSET*4(a1)
367        lwc1 $f14,FP14_OFFSET*4(a1)
368        lwc1 $f15,FP15_OFFSET*4(a1)
369        lwc1 $f16,FP16_OFFSET*4(a1)
370        lwc1 $f17,FP17_OFFSET*4(a1)
371        lwc1 $f18,FP18_OFFSET*4(a1)
372        lwc1 $f19,FP19_OFFSET*4(a1)
373        lwc1 $f20,FP20_OFFSET*4(a1)
374        lwc1 $f21,FP21_OFFSET*4(a1)
375        lwc1 $f22,FP22_OFFSET*4(a1)
376        lwc1 $f23,FP23_OFFSET*4(a1)
377        lwc1 $f24,FP24_OFFSET*4(a1)
378        lwc1 $f25,FP25_OFFSET*4(a1)
379        lwc1 $f26,FP26_OFFSET*4(a1)
380        lwc1 $f27,FP27_OFFSET*4(a1)
381        lwc1 $f28,FP28_OFFSET*4(a1)
382        lwc1 $f29,FP29_OFFSET*4(a1)
383        lwc1 $f30,FP30_OFFSET*4(a1)
384        lwc1 $f31,FP31_OFFSET*4(a1)
385        cfc1 a0,$31                  /* Read from FP status/control reg */
386        cfc1 a0,$31                  /* Two reads clear pipeline */
387        NOP                          /* NOPs ensure execution */
388        NOP
389        lw a0,FPCS_OFFSET*4(a1)      /* Load saved FPCS value */
390        NOP
391        ctc1 a0,$31                  /* Restore FPCS register */
392        NOP
393        j ra
394        NOP
395        .set at
396ENDFRAME(_CPU_Context_restore_fp)
397#endif
398
399/*  _CPU_Context_switch
400 *
401 *  This routine performs a normal non-FP context switch.
402 */
403
404/* void _CPU_Context_switch(
405 *   Context_Control  *run,
406 *   Context_Control  *heir
407 * )
408 */
409
410FRAME(_CPU_Context_switch,sp,0,ra)
411        .set noreorder
412
413        MFC0    t0,C0_SR
414#if (__mips == 3) || (__mips == 32)
415        li      t1,SR_IE
416#elif __mips == 1
417        li      t1,SR_IEC
418#endif
419        STREG   t0,C0_SR_OFFSET*R_SZ(a0)        /* save the task's SR */
420        not     t1
421        and     t0,t1                           /* mask off interrupts while we context switch */
422        MTC0    t0,C0_SR
423        NOP
424
425        STREG ra,RA_OFFSET*R_SZ(a0)             /* save current context */
426        STREG sp,SP_OFFSET*R_SZ(a0)
427        STREG fp,FP_OFFSET*R_SZ(a0)
428        STREG s0,S0_OFFSET*R_SZ(a0)
429        STREG s1,S1_OFFSET*R_SZ(a0)
430        STREG s2,S2_OFFSET*R_SZ(a0)
431        STREG s3,S3_OFFSET*R_SZ(a0)
432        STREG s4,S4_OFFSET*R_SZ(a0)
433        STREG s5,S5_OFFSET*R_SZ(a0)
434        STREG s6,S6_OFFSET*R_SZ(a0)
435        STREG s7,S7_OFFSET*R_SZ(a0)
436
437       
438        /*
439        ** this code grabs the userspace EPC if we're dispatching from
440        ** an interrupt frame or supplies the address of the dispatch
441        ** routines if not.  This is entirely for the gdbstub's benefit so
442        ** it can know where each task is running.
443        **
444        ** Its value is only set when calling threadDispatch from
445        ** the interrupt handler and is cleared immediately when this
446        ** routine gets it.
447        */
448       
449        la      t0,__exceptionStackFrame        /* see if we're coming in from an exception */
450        LDREG   t1, (t0)
451        NOP
452        beqz    t1,1f
453
454        STREG   zero, (t0)                      /* and clear it */
455        NOP
456        LDREG   t0,R_EPC*R_SZ(t1)               /* get the userspace EPC from the frame */
457        b       2f
458        nop
459               
4601:      la    t0,_Thread_Dispatch               /* if ==0, we're switched out */
461
4622:      STREG   t0,C0_EPC_OFFSET*R_SZ(a0)
463       
464
465_CPU_Context_switch_restore:
466        LDREG ra,RA_OFFSET*R_SZ(a1)             /* restore context */
467        LDREG sp,SP_OFFSET*R_SZ(a1)
468        LDREG fp,FP_OFFSET*R_SZ(a1)
469        LDREG s0,S0_OFFSET*R_SZ(a1)
470        LDREG s1,S1_OFFSET*R_SZ(a1)
471        LDREG s2,S2_OFFSET*R_SZ(a1)
472        LDREG s3,S3_OFFSET*R_SZ(a1)
473        LDREG s4,S4_OFFSET*R_SZ(a1)
474        LDREG s5,S5_OFFSET*R_SZ(a1)
475        LDREG s6,S6_OFFSET*R_SZ(a1)
476        LDREG s7,S7_OFFSET*R_SZ(a1)
477
478        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
479       
480/*      NOP */
481/*#if __mips == 3 */
482/*        andi  t0,SR_EXL */
483/*        bnez  t0,_CPU_Context_1 */   /* set exception level from restore context */
484/*        li    t0,~SR_EXL */
485/*        MFC0  t1,C0_SR */
486/*        NOP */
487/*        and   t1,t0 */
488/*        MTC0  t1,C0_SR */
489/* */
490/*#elif __mips == 1 */
491/* */
492/*        andi  t0,(SR_INTERRUPT_ENABLE_BITS) */ /* we know 0 disabled */
493/*        beq   t0,$0,_CPU_Context_1  */         /* set level from restore context */
494/*        MFC0  t0,C0_SR */
495/*        NOP */
496/*        or    t0,(SR_INTERRUPT_ENABLE_BITS) */ /* new_sr = old sr with enabled  */
497/*        MTC0  t0,C0_SR */                     /* set with enabled */
498/*        NOP */
499
500
501/*
502** Incorporate the incoming task's FP coprocessor state and interrupt mask/enable
503** into the status register.  We jump thru the requisite hoops to ensure we
504** maintain all other SR bits as global values.
505**
506** Get the task's FPU enable, int mask & int enable bits.  Although we keep the
507** software int enables on a per-task basis, the rtems_task_create
508** Interrupt Level & int level manipulation functions cannot enable/disable them,
509** so they are automatically enabled for all tasks.  To turn them off, a task 
510** must itself manipulate the SR register. 
511**
512** Although something of a hack on this processor, we treat the SR register
513** int enables as the RTEMS interrupt level.  We use the int level
514** value as a bitmask, not as any sort of greater than/less than metric.
515** Manipulation of a task's interrupt level corresponds directly to manipulation
516** of that task's SR bits, as seen in cpu.c
517**
518** Note, interrupts are disabled before context is saved, though the task's
519** interrupt enable state is recorded.  The task swapping in will apply its
520** specific SR bits, including interrupt enable.  If further task-specific
521** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and
522** cpu.h task initialization code that will be affected. 
523*/
524
525        li      t2,SR_CU1
526        or      t2,SR_IMASK
527
528        /* int enable bits */
529#if (__mips == 3) || (__mips == 32)
530        /*
531        ** Save IE
532        */
533        or      t2, SR_IE
534#elif __mips == 1
535        /*
536        ** Save current, previous & old int enables.  This is key because
537        ** we can dispatch from within the stack frame used by an
538        ** interrupt service.  The int enables nest, but not beyond
539        ** previous and old because of the dispatch interlock seen
540        ** in the interrupt processing code.
541        */
542        or      t2,SR_IEC + SR_IEP + SR_IEO
543#endif
544        and     t0,t2           /* keep only the per-task bits */
545               
546        MFC0    t1,C0_SR        /* grab the current SR */
547        not     t2             
548        and     t1,t2           /* mask off the old task's per-task bits */
549        or      t1,t0           /* or in the new task's bits */
550        MTC0    t1,C0_SR        /* and load the new SR */
551        NOP
552       
553/* _CPU_Context_1: */
554        j       ra
555        NOP
556ENDFRAME(_CPU_Context_switch)
557
558       
559/*
560 *  _CPU_Context_restore
561 *
562 *  This routine is generally used only to restart self in an
563 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
564 *
565 *  NOTE: May be unnecessary to reload some registers.
566 *
567 *  void _CPU_Context_restore(
568 *    Context_Control *new_context
569 *  );
570 */
571
572FRAME(_CPU_Context_restore,sp,0,ra)
573        .set noreorder
574        move    a1,a0
575        j       _CPU_Context_switch_restore
576        NOP
577
578ENDFRAME(_CPU_Context_restore)
579
580       
581ASM_EXTERN(_ISR_Nest_level, SZ_INT)
582ASM_EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
583ASM_EXTERN(_Context_Switch_necessary,SZ_INT)
584ASM_EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
585ASM_EXTERN(_Thread_Executing,SZ_INT)
586       
587.extern _Thread_Dispatch
588.extern _ISR_Vector_table
589
590
591       
592
593
594/*  void _DBG_Handler()
595 *
596 *  This routine services the (at least) MIPS1 debug vector,
597 *  only used the the hardware debugging features.  This code,
598 *  while optional, is best located here because its intrinsically
599 *  associated with exceptions in general & thus tied pretty
600 *  closely to _ISR_Handler.
601 *
602 */
603
604
605FRAME(_DBG_Handler,sp,0,ra)
606        .set noreorder
607        la      k0,_ISR_Handler
608        j       k0
609        NOP
610        .set reorder
611ENDFRAME(_DBG_Handler)
612
613
614
615
616       
617/*  void __ISR_Handler()
618 *
619 *  This routine provides the RTEMS interrupt management.
620 *
621 *  void _ISR_Handler()
622 *
623 *
624 *  This discussion ignores a lot of the ugly details in a real
625 *  implementation such as saving enough registers/state to be
626 *  able to do something real.  Keep in mind that the goal is
627 *  to invoke a user's ISR handler which is written in C and
628 *  uses a certain set of registers.
629 *
630 *  Also note that the exact order is to a large extent flexible.
631 *  Hardware will dictate a sequence for a certain subset of
632 *  _ISR_Handler while requirements for setting
633 *
634 *  At entry to "common" _ISR_Handler, the vector number must be
635 *  available.  On some CPUs the hardware puts either the vector
636 *  number or the offset into the vector table for this ISR in a
637 *  known place.  If the hardware does not give us this information,
638 *  then the assembly portion of RTEMS for this port will contain
639 *  a set of distinct interrupt entry points which somehow place
640 *  the vector number in a known place (which is safe if another
641 *  interrupt nests this one) and branches to _ISR_Handler.
642 *
643 */
644
645FRAME(_ISR_Handler,sp,0,ra)
646        .set noreorder
647
648        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
649
650        /* wastes a lot of stack space for context?? */
651        ADDIU    sp,sp,-EXCP_STACK_SIZE
652
653        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
654        STREG v0, R_V0*R_SZ(sp)
655        STREG v1, R_V1*R_SZ(sp)
656        STREG a0, R_A0*R_SZ(sp)
657        STREG a1, R_A1*R_SZ(sp)
658        STREG a2, R_A2*R_SZ(sp)
659        STREG a3, R_A3*R_SZ(sp)
660        STREG t0, R_T0*R_SZ(sp)
661        STREG t1, R_T1*R_SZ(sp)
662        STREG t2, R_T2*R_SZ(sp)
663        STREG t3, R_T3*R_SZ(sp)
664        STREG t4, R_T4*R_SZ(sp)
665        STREG t5, R_T5*R_SZ(sp)
666        STREG t6, R_T6*R_SZ(sp)
667        STREG t7, R_T7*R_SZ(sp)
668        mflo  t0
669        STREG t8, R_T8*R_SZ(sp)
670        STREG t0, R_MDLO*R_SZ(sp)
671        STREG t9, R_T9*R_SZ(sp)
672        mfhi  t0
673        STREG gp, R_GP*R_SZ(sp)
674        STREG t0, R_MDHI*R_SZ(sp)
675        STREG fp, R_FP*R_SZ(sp)
676       
677        .set noat
678        STREG AT, R_AT*R_SZ(sp)
679        .set at
680
681        MFC0     t0,C0_SR
682        MFC0     t1,C0_EPC
683        STREG    t0,R_SR*R_SZ(sp)
684        STREG    t1,R_EPC*R_SZ(sp)
685       
686
687#ifdef INSTRUMENT_EXECUTING_THREAD
688        lw t2, _Thread_Executing
689        NOP
690        sw t2, 0x8001FFF0
691#endif
692       
693        /* determine if an interrupt generated this exception */
694
695        MFC0     t0,C0_CAUSE
696        NOP
697
698        and      t1,t0,CAUSE_EXCMASK
699        beq      t1, 0, _ISR_Handler_1
700
701_ISR_Handler_Exception:
702
703        /*  If we return from the exception, it is assumed nothing
704         *  bad is going on and we can continue to run normally.
705         *  But we want to save the entire CPU context so exception
706         *  handlers can look at it and change it.
707         *
708         *  NOTE: This is the path the debugger stub will take.
709         */
710
711        /* already got t0 = cause in the interrupt test above */
712        STREG    t0,R_CAUSE*R_SZ(sp)
713
714        STREG    sp, R_SP*R_SZ(sp)
715       
716        STREG    s0,R_S0*R_SZ(sp)     /* save s0 - s7 */
717        STREG    s1,R_S1*R_SZ(sp)
718        STREG    s2,R_S2*R_SZ(sp)
719        STREG    s3,R_S3*R_SZ(sp)
720        STREG    s4,R_S4*R_SZ(sp)
721        STREG    s5,R_S5*R_SZ(sp)
722        STREG    s6,R_S6*R_SZ(sp)
723        STREG    s7,R_S7*R_SZ(sp)
724
725        /* CP0 special registers */
726
727#if __mips == 1
728        MFC0     t0,C0_TAR
729#endif
730        MFC0     t1,C0_BADVADDR
731       
732#if __mips == 1
733        STREG    t0,R_TAR*R_SZ(sp)
734#else
735        NOP
736#endif
737        STREG    t1,R_BADVADDR*R_SZ(sp)
738       
739#if ( CPU_HARDWARE_FP == TRUE )
740        MFC0     t0,C0_SR                 /* we have a FPU, save state if enabled */
741        NOP
742        srl      t0,t0,16
743        andi     t0,t0,(SR_CU1 >> 16)
744        beqz     t0, 1f
745        NOP
746       
747        la       a1,R_F0*R_SZ(sp)
748        jal      _CPU_Context_save_fp_from_exception
749        NOP
750        MFC1     t0,C1_REVISION
751        MFC1     t1,C1_STATUS
752        STREG    t0,R_FEIR*R_SZ(sp)
753        STREG    t1,R_FCSR*R_SZ(sp)
754       
7551:     
756#endif
757       
758        move     a0,sp
759        jal      mips_vector_exceptions
760        NOP
761
762       
763        /*
764        ** Note, if the exception vector returns, rely on it to have
765        ** adjusted EPC so we will return to some correct address.  If
766        ** this is not done, we might get stuck in an infinite loop because
767        ** we'll return to the instruction where the exception occured and
768        ** it could throw again.
769        **
770        ** It is expected the only code using the exception processing is
771        ** either the gdb stub or some user code which is either going to
772        ** panic or do something useful.  Regardless, it is up to each
773        ** exception routine to properly adjust EPC, so the code below
774        ** may be helpful for doing just that.
775        */
776       
777/* *********************************************************************
778** this code follows the R3000's exception return logic, but is not
779** needed because the gdb stub does it for us.  It might be useful
780** for something else at some point...
781**
782        * compute the address of the instruction we'll return to *
783
784        LDREG   t1, R_CAUSE*R_SZ(sp)
785        LDREG   t0, R_EPC*R_SZ(sp)
786
787        * first see if the exception happened in the delay slot *
788        li      t3,CAUSE_BD
789        AND     t4,t1,t3
790        beqz    t4,excnodelay
791        NOP
792       
793        * it did, now see if the branch occured or not *
794        li      t3,CAUSE_BT
795        AND     t4,t1,t3
796        beqz    t4,excnobranch
797        NOP
798       
799        * branch was taken, we resume at the branch target *
800        LDREG   t0, R_TAR*R_SZ(sp)
801        j       excreturn
802        NOP
803
804excnobranch:
805        ADDU    t0,R_SZ
806
807excnodelay:     
808        ADDU    t0,R_SZ
809               
810excreturn:     
811        STREG   t0, R_EPC*R_SZ(sp)
812        NOP
813********************************************************************* */
814       
815
816 /* if we're returning into mips_break, move to the next instruction */
817       
818        LDREG   t0,R_EPC*R_SZ(sp)
819        la      t1,mips_break
820        xor     t2,t0,t1
821        bnez    t2,3f
822       
823        addu    t0,R_SZ
824        STREG   t0,R_EPC*R_SZ(sp)
825        NOP
8263:     
827
828       
829       
830               
831#if ( CPU_HARDWARE_FP == TRUE )
832        MFC0     t0,C0_SR               /* FPU is present, restore state if enabled */
833        NOP
834        srl      t0,t0,16
835        andi     t0,t0,(SR_CU1 >> 16)
836        beqz     t0, 2f
837        NOP
838       
839        la       a1,R_F0*R_SZ(sp)
840        jal      _CPU_Context_restore_fp_from_exception
841        NOP
842        LDREG    t0,R_FEIR*R_SZ(sp)
843        LDREG    t1,R_FCSR*R_SZ(sp)
844        MTC1     t0,C1_REVISION
845        MTC1     t1,C1_STATUS
8462:
847#endif
848        LDREG    s0,R_S0*R_SZ(sp)    /* restore s0 - s7 */
849        LDREG    s1,R_S1*R_SZ(sp)
850        LDREG    s2,R_S2*R_SZ(sp)
851        LDREG    s3,R_S3*R_SZ(sp)
852        LDREG    s4,R_S4*R_SZ(sp)
853        LDREG    s5,R_S5*R_SZ(sp)
854        LDREG    s6,R_S6*R_SZ(sp)
855        LDREG    s7,R_S7*R_SZ(sp)
856
857        /* do NOT restore the sp as this could mess up the world */
858        /* do NOT restore the cause as this could mess up the world */
859
860        /*
861        ** Jump all the way out.  If theres a pending interrupt, just
862        ** let it be serviced later.  Since we're probably using the
863        ** gdb stub, we've already disrupted the ISR service timing
864        ** anyhow.  We oughtn't mix exception and interrupt processing
865        ** in the same exception call in case the exception stuff
866        ** might interfere with the dispatching & timer ticks.
867        */
868        j        _ISR_Handler_exit
869        NOP
870
871_ISR_Handler_1:
872
873        MFC0     t1,C0_SR
874        and      t0,CAUSE_IPMASK
875        and      t0,t1
876
877        /* external interrupt not enabled, ignore */
878        /* but if it's not an exception or an interrupt, */
879        /* Then where did it come from??? */
880       
881        beq      t0,zero,_ISR_Handler_exit
882        nop
883       
884       
885               
886  /*
887   *  save some or all context on stack
888   *  may need to save some special interrupt information for exit
889   *
890   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
891   *    if ( _ISR_Nest_level == 0 )
892   *      switch to software interrupt stack
893   *  #endif
894   */
895
896  /*
897   *  _ISR_Nest_level++;
898   */
899        LDREG  t0,_ISR_Nest_level
900        NOP
901        ADD    t0,t0,1
902        STREG  t0,_ISR_Nest_level
903  /*
904   *  _Thread_Dispatch_disable_level++;
905   */
906        LDREG  t1,_Thread_Dispatch_disable_level
907        NOP
908        ADD    t1,t1,1
909        STREG  t1,_Thread_Dispatch_disable_level
910
911  /*
912   *  Call the CPU model or BSP specific routine to decode the
913   *  interrupt source and actually vector to device ISR handlers.
914   */
915       
916#ifdef INSTRUMENT_ISR_VECTORING
917        NOP
918        li      t1, 1
919        sw      t1, 0x8001e000
920#endif
921
922        move     a0,sp
923        jal      mips_vector_isr_handlers
924        NOP
925       
926#ifdef INSTRUMENT_ISR_VECTORING
927        li      t1, 0
928        sw      t1, 0x8001e000
929        NOP
930#endif
931               
932  /*
933   *  --_ISR_Nest_level;
934   */
935        LDREG  t2,_ISR_Nest_level
936        NOP
937        ADD    t2,t2,-1
938        STREG  t2,_ISR_Nest_level
939  /*
940   *  --_Thread_Dispatch_disable_level;
941   */
942        LDREG  t1,_Thread_Dispatch_disable_level
943        NOP
944        ADD    t1,t1,-1
945        STREG  t1,_Thread_Dispatch_disable_level
946  /*
947   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
948   *    goto the label "exit interrupt (simple case)"
949   */
950        or  t0,t2,t1
951        bne t0,zero,_ISR_Handler_exit
952        NOP
953
954
955
956       
957  /*
958   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
959   *    restore stack
960   *  #endif
961   * 
962   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
963   *    goto the label "exit interrupt (simple case)"
964   */
965        LDREG t0,_Context_Switch_necessary
966        LDREG t1,_ISR_Signals_to_thread_executing
967        NOP
968        or    t0,t0,t1
969        beq   t0,zero,_ISR_Handler_exit
970        NOP
971
972       
973       
974#ifdef INSTRUMENT_EXECUTING_THREAD
975        lw      t0,_Thread_Executing
976        NOP
977        sw      t0,0x8001FFF4
978#endif
979
980/*
981** Turn on interrupts before entering Thread_Dispatch which
982** will run for a while, thus allowing new interrupts to
983** be serviced.  Observe the Thread_Dispatch_disable_level interlock
984** that prevents recursive entry into Thread_Dispatch.
985*/
986
987        MFC0    t0, C0_SR
988#if __mips == 1
989       
990        li      t1,SR_IEC
991        or      t0, t1
992       
993#elif (__mips == 3) || (__mips == 32)
994       
995        /*
996        ** clear XL and set IE so we can get interrupts.
997        */
998        li      t1, SR_EXL
999        not     t1
1000        and     t0,t1
1001        or      t0, SR_IE
1002       
1003#endif
1004        MTC0    t0, C0_SR
1005        NOP
1006
1007        /* save off our stack frame so the context switcher can get to it */
1008        la      t0,__exceptionStackFrame
1009        STREG   sp,(t0)
1010                                       
1011        jal     _Thread_Dispatch
1012        NOP
1013
1014        /*
1015        ** And make sure its clear in case we didn't dispatch.  if we did, its
1016        ** already cleared
1017        */
1018        la      t0,__exceptionStackFrame
1019        STREG   zero,(t0)
1020        NOP
1021
1022/*
1023** turn interrupts back off while we restore context so
1024** a badly timed interrupt won't mess things up
1025*/
1026        MFC0    t0, C0_SR
1027
1028#if __mips == 1
1029 
1030        /* ints off, current & prev kernel mode on (kernel mode enabled is bit clear..argh!) */
1031        li      t1,SR_IEC | SR_KUP | SR_KUC     
1032        not     t1
1033        and     t0, t1
1034        MTC0    t0, C0_SR
1035        NOP
1036
1037 #elif (__mips == 3) || (__mips == 32)
1038
1039        move    t2, t0
1040       
1041  /* make sure XL & IE are clear so ints are disabled & we can update EPC for the return */
1042        li   t1,SR_EXL | SR_IE
1043        not  t1
1044        and  t0,t1
1045        MTC0 t0,C0_SR
1046        NOP
1047       
1048        /* store new EPC value, which we can do since XL=0 */
1049        LDREG   t0, R_EPC*R_SZ(sp)
1050        NOP
1051        MTC0    t0, C0_EPC
1052        NOP
1053       
1054        /* apply task's SR with XL set so the eret will return properly */
1055        or      t2, SR_EXL
1056        MTC0    t2, C0_SR
1057        NOP
1058#endif
1059 
1060 
1061 
1062 
1063 
1064       
1065#ifdef INSTRUMENT_EXECUTING_THREAD
1066        lw      t0,_Thread_Executing
1067        NOP
1068        sw      t0,0x8001FFF8
1069#endif
1070
1071       
1072  /*
1073   *  prepare to get out of interrupt
1074   *  return from interrupt  (maybe to _ISR_Dispatch)
1075   *
1076   *  LABEL "exit interrupt (simple case):"
1077   *  prepare to get out of interrupt
1078   *  return from interrupt
1079   */
1080
1081_ISR_Handler_exit:
1082/*
1083** Skip the SR restore because its a global register. _CPU_Context_switch_restore
1084** adjusts it according to each task's configuration.  If we didn't dispatch, the
1085** SR value isn't changed, so all we need to do is return.
1086**
1087*/
1088        /* restore context from stack */
1089       
1090#ifdef INSTRUMENT_EXECUTING_THREAD
1091        lw      t0,_Thread_Executing
1092        NOP
1093        sw      t0, 0x8001FFFC
1094#endif
1095
1096        LDREG t8, R_MDLO*R_SZ(sp)
1097        LDREG t0, R_T0*R_SZ(sp)
1098        mtlo  t8
1099        LDREG t8, R_MDHI*R_SZ(sp)           
1100        LDREG t1, R_T1*R_SZ(sp)
1101        mthi  t8
1102        LDREG t2, R_T2*R_SZ(sp)
1103        LDREG t3, R_T3*R_SZ(sp)
1104        LDREG t4, R_T4*R_SZ(sp)
1105        LDREG t5, R_T5*R_SZ(sp)
1106        LDREG t6, R_T6*R_SZ(sp)
1107        LDREG t7, R_T7*R_SZ(sp)
1108        LDREG t8, R_T8*R_SZ(sp)
1109        LDREG t9, R_T9*R_SZ(sp)
1110        LDREG gp, R_GP*R_SZ(sp)
1111        LDREG fp, R_FP*R_SZ(sp)
1112        LDREG ra, R_RA*R_SZ(sp)
1113        LDREG a0, R_A0*R_SZ(sp)
1114        LDREG a1, R_A1*R_SZ(sp)
1115        LDREG a2, R_A2*R_SZ(sp)
1116        LDREG a3, R_A3*R_SZ(sp)
1117        LDREG v1, R_V1*R_SZ(sp)
1118        LDREG v0, R_V0*R_SZ(sp)
1119       
1120#if __mips == 1
1121        LDREG     k1, R_EPC*R_SZ(sp)
1122#endif
1123               
1124        .set noat
1125        LDREG     AT, R_AT*R_SZ(sp)
1126        .set at
1127
1128        ADDIU     sp,sp,EXCP_STACK_SIZE
1129
1130#if (__mips == 3) || (__mips == 32)
1131        eret
1132#elif __mips == 1
1133        j         k1
1134        rfe
1135#endif
1136        NOP
1137
1138       .set    reorder
1139ENDFRAME(_ISR_Handler)
1140
1141
1142
1143       
1144FRAME(mips_break,sp,0,ra)
1145        .set noreorder
1146        break   0x0     /* this statement must be first in this function, assumed so by mips-stub.c */
1147        NOP
1148        j       ra
1149        NOP
1150       .set    reorder
1151ENDFRAME(mips_break)
1152
Note: See TracBrowser for help on using the repository browser.