source: rtems/cpukit/score/cpu/mips/cpu_asm.S @ febaa8a

4.104.115
Last change on this file since febaa8a was febaa8a, checked in by Joel Sherrill <joel.sherrill@…>, on 03/27/10 at 15:03:09

2010-03-27 Joel Sherrill <joel.sherrill@…>

  • cpu.c, cpu_asm.S: Add include of config.h
  • Property mode set to 100644
File size: 30.5 KB
Line 
1/*
2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
22 *          the baseline of the more general MIPS port.
23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
26 *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
27 *          performance, tweaking this code and the isr vectoring routines
28 *          to reduce overhead & latencies.  Added optional
29 *          instrumentation as well.
30 *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
31 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
32 *          and deferred FP contexts.
33 *    2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing
34 *          by increasing the amount of context saved/restored.
35 *    2004: 24March, Art Ferrer, NASA/GSFC, added save of FP status/control
36 *          register to fix intermittent FP error encountered on ST5 mission
37 *          implementation on Mongoose V processor.
38 *    2004: April 7, Greg Menke <gregory.menke@gsfc.nasa.gov> Added __mips==32
39 *          support for R4000 processors running 32 bit code.  Fixed #define
40 *          problems that caused fpu code to always be included even when no
41 *          fpu is present.
42 *
43 *  COPYRIGHT (c) 1989-2002.
44 *  On-Line Applications Research Corporation (OAR).
45 *
46 *  The license and distribution terms for this file may be
47 *  found in the file LICENSE in this distribution or at
48 *  http://www.rtems.com/license/LICENSE.
49 *
50 *  $Id$
51 */
52
53#ifdef HAVE_CONFIG_H
54#include "config.h"
55#endif
56
57#include <rtems/asm.h>
58#include <rtems/mips/iregdef.h>
59#include <rtems/mips/idtcpu.h>
60
61#define ASSEMBLY_ONLY
62#include <rtems/score/cpu.h>
63
64#if TRUE
65#else
66#error TRUE is not true
67#endif
68#if FALSE
69#error FALSE is not false
70#else
71#endif
72
73/*             
74#if ( CPU_HARDWARE_FP == TRUE )
75#warning CPU_HARDWARE_FP == TRUE
76#else
77#warning CPU_HARDWARE_FP != TRUE
78#endif
79*/
80       
81               
82/* enable debugging shadow writes to misc ram, this is a vestigal
83* Mongoose-ism debug tool- but may be handy in the future so we
84* left it in...
85*/
86
87/* #define INSTRUMENT_ISR_VECTORING */
88/* #define INSTRUMENT_EXECUTING_THREAD */
89
90
91       
92/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
93 *  and MIPS ISA Level 1 (R3xxx).
94 */
95
96#if __mips == 3
97/* 64 bit register operations */
98#define NOP     nop
99#define ADD     dadd
100#define STREG   sd
101#define LDREG   ld
102#define MFCO    dmfc0           /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
103#define MTCO    dmtc0           /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
104#define ADDU    addu
105#define ADDIU   addiu
106#if (__mips_fpr==32)
107#define STREGC1 swc1
108#define LDREGC1 lwc1
109#elif (__mips_fpr==64)          /* Use these instructions if there are 64 bit floating point registers. This requires FR bit to be set in C0_SR */
110#define STREGC1 sdc1
111#define LDREGC1 ldc1
112#endif
113#define R_SZ    8
114#define F_SZ    8
115#define SZ_INT  8
116#define SZ_INT_POW2 3
117
118/* XXX if we don't always want 64 bit register ops, then another ifdef */
119
120#elif (__mips == 1 ) || (__mips == 32)
121/* 32 bit register operations*/
122#define NOP     nop
123#define ADD     add
124#define STREG   sw
125#define LDREG   lw
126#define MFCO    mfc0
127#define MTCO    mtc0
128#define ADDU    add
129#define ADDIU   addi
130#define STREGC1 swc1
131#define LDREGC1 lwc1
132#define R_SZ    4
133#define F_SZ    4
134#define SZ_INT  4
135#define SZ_INT_POW2 2
136#else
137#error "mips assembly: what size registers do I deal with?"
138#endif
139
140
141#define ISR_VEC_SIZE    4
142#define EXCP_STACK_SIZE (NREGS*R_SZ)
143
144       
145#ifdef __GNUC__
146#define ASM_EXTERN(x,size) .extern x,size
147#else
148#define ASM_EXTERN(x,size)
149#endif
150
151/* NOTE: these constants must match the Context_Control structure in cpu.h */
152#define S0_OFFSET 0
153#define S1_OFFSET 1
154#define S2_OFFSET 2
155#define S3_OFFSET 3
156#define S4_OFFSET 4
157#define S5_OFFSET 5
158#define S6_OFFSET 6
159#define S7_OFFSET 7
160#define SP_OFFSET 8
161#define FP_OFFSET 9
162#define RA_OFFSET 10
163#define C0_SR_OFFSET 11
164#define C0_EPC_OFFSET 12
165
166/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
167#define FP0_OFFSET  0
168#define FP1_OFFSET  1
169#define FP2_OFFSET  2
170#define FP3_OFFSET  3
171#define FP4_OFFSET  4
172#define FP5_OFFSET  5
173#define FP6_OFFSET  6
174#define FP7_OFFSET  7
175#define FP8_OFFSET  8
176#define FP9_OFFSET  9
177#define FP10_OFFSET 10
178#define FP11_OFFSET 11
179#define FP12_OFFSET 12
180#define FP13_OFFSET 13
181#define FP14_OFFSET 14
182#define FP15_OFFSET 15
183#define FP16_OFFSET 16
184#define FP17_OFFSET 17
185#define FP18_OFFSET 18
186#define FP19_OFFSET 19
187#define FP20_OFFSET 20
188#define FP21_OFFSET 21
189#define FP22_OFFSET 22
190#define FP23_OFFSET 23
191#define FP24_OFFSET 24
192#define FP25_OFFSET 25
193#define FP26_OFFSET 26
194#define FP27_OFFSET 27
195#define FP28_OFFSET 28
196#define FP29_OFFSET 29
197#define FP30_OFFSET 30
198#define FP31_OFFSET 31
199#define FPCS_OFFSET 32
200
201       
202ASM_EXTERN(__exceptionStackFrame, SZ_INT)
203
204       
205               
206/*
207 *  _CPU_Context_save_fp_context
208 *
209 *  This routine is responsible for saving the FP context
210 *  at *fp_context_ptr.  If the point to load the FP context
211 *  from is changed then the pointer is modified by this routine.
212 *
213 *  Sometimes a macro implementation of this is in cpu.h which dereferences
214 *  the ** and a similarly named routine in this file is passed something
215 *  like a (Context_Control_fp *).  The general rule on making this decision
216 *  is to avoid writing assembly language.
217 */
218
219/* void _CPU_Context_save_fp(
220 *   void **fp_context_ptr
221 * );
222 */
223
224#if ( CPU_HARDWARE_FP == TRUE )
225FRAME(_CPU_Context_save_fp,sp,0,ra)
226        .set noreorder
227        .set noat
228
229        /*
230        ** Make sure the FPU is on before we save state.  This code
231        ** is here because the FPU context switch might occur when an
232        ** integer task is switching out with a FP task switching in.
233        */
234        mfc0    t0,C0_SR
235        li      t2,SR_CU1       
236        move    t1,t0
237        or      t0,t2           /* turn on the fpu */
238#if (__mips == 3) || (__mips == 32)
239        li      t2,SR_IE
240#elif __mips == 1
241        li      t2,SR_IEC
242#endif
243        not     t2
244        and     t0,t2           /* turn off interrupts */       
245        mtc0    t0,C0_SR
246       
247        lw      a1,(a0)         /* get address of context storage area */
248        move    t0,ra
249        jal     _CPU_Context_save_fp_from_exception
250        NOP
251       
252        /*
253        ** Reassert the task's state because we've not saved it yet.
254        */
255        mtc0    t1,C0_SR
256        j       t0
257        NOP
258       
259        .globl _CPU_Context_save_fp_from_exception
260_CPU_Context_save_fp_from_exception:
261        STREGC1 $f0,FP0_OFFSET*F_SZ(a1)
262        STREGC1 $f1,FP1_OFFSET*F_SZ(a1)
263        STREGC1 $f2,FP2_OFFSET*F_SZ(a1)
264        STREGC1 $f3,FP3_OFFSET*F_SZ(a1)
265        STREGC1 $f4,FP4_OFFSET*F_SZ(a1)
266        STREGC1 $f5,FP5_OFFSET*F_SZ(a1)
267        STREGC1 $f6,FP6_OFFSET*F_SZ(a1)
268        STREGC1 $f7,FP7_OFFSET*F_SZ(a1)
269        STREGC1 $f8,FP8_OFFSET*F_SZ(a1)
270        STREGC1 $f9,FP9_OFFSET*F_SZ(a1)
271        STREGC1 $f10,FP10_OFFSET*F_SZ(a1)
272        STREGC1 $f11,FP11_OFFSET*F_SZ(a1)
273        STREGC1 $f12,FP12_OFFSET*F_SZ(a1)
274        STREGC1 $f13,FP13_OFFSET*F_SZ(a1)
275        STREGC1 $f14,FP14_OFFSET*F_SZ(a1)
276        STREGC1 $f15,FP15_OFFSET*F_SZ(a1)
277        STREGC1 $f16,FP16_OFFSET*F_SZ(a1)
278        STREGC1 $f17,FP17_OFFSET*F_SZ(a1)
279        STREGC1 $f18,FP18_OFFSET*F_SZ(a1)
280        STREGC1 $f19,FP19_OFFSET*F_SZ(a1)
281        STREGC1 $f20,FP20_OFFSET*F_SZ(a1)
282        STREGC1 $f21,FP21_OFFSET*F_SZ(a1)
283        STREGC1 $f22,FP22_OFFSET*F_SZ(a1)
284        STREGC1 $f23,FP23_OFFSET*F_SZ(a1)
285        STREGC1 $f24,FP24_OFFSET*F_SZ(a1)
286        STREGC1 $f25,FP25_OFFSET*F_SZ(a1)
287        STREGC1 $f26,FP26_OFFSET*F_SZ(a1)
288        STREGC1 $f27,FP27_OFFSET*F_SZ(a1)
289        STREGC1 $f28,FP28_OFFSET*F_SZ(a1)
290        STREGC1 $f29,FP29_OFFSET*F_SZ(a1)
291        STREGC1 $f30,FP30_OFFSET*F_SZ(a1)
292        STREGC1 $f31,FP31_OFFSET*F_SZ(a1)
293        cfc1 a0,$31                    /* Read FP status/conrol reg */
294        cfc1 a0,$31                    /* Two reads clear pipeline */
295        NOP
296        NOP
297        sw a0, FPCS_OFFSET*F_SZ(a1)    /* Store value to FPCS location */
298        NOP
299        j ra
300        NOP
301        .set at
302ENDFRAME(_CPU_Context_save_fp)
303#endif
304
305/*
306 *  _CPU_Context_restore_fp_context
307 *
308 *  This routine is responsible for restoring the FP context
309 *  at *fp_context_ptr.  If the point to load the FP context
310 *  from is changed then the pointer is modified by this routine.
311 *
312 *  Sometimes a macro implementation of this is in cpu.h which dereferences
313 *  the ** and a similarly named routine in this file is passed something
314 *  like a (Context_Control_fp *).  The general rule on making this decision
315 *  is to avoid writing assembly language.
316 */
317
318/* void _CPU_Context_restore_fp(
319 *   void **fp_context_ptr
320 * )
321 */
322
323#if ( CPU_HARDWARE_FP == TRUE )
324FRAME(_CPU_Context_restore_fp,sp,0,ra)
325        .set noat
326        .set noreorder
327       
328        /*
329        ** Make sure the FPU is on before we retrieve state.  This code
330        ** is here because the FPU context switch might occur when an
331        ** integer task is switching out with a FP task switching in.
332        */
333        mfc0    t0,C0_SR
334        li      t2,SR_CU1       
335        move    t1,t0
336        or      t0,t2           /* turn on the fpu */
337#if (__mips == 3) || (__mips == 32)
338        li      t2,SR_IE
339#elif __mips == 1
340        li      t2,SR_IEC
341#endif
342        not     t2
343        and     t0,t2           /* turn off interrupts */       
344        mtc0    t0,C0_SR
345       
346        lw      a1,(a0)         /* get address of context storage area */
347        move    t0,ra
348        jal     _CPU_Context_restore_fp_from_exception
349        NOP
350
351        /*
352        ** Reassert the old task's state because we've not restored the
353        ** new one yet.
354        */
355        mtc0    t1,C0_SR
356        j       t0
357        NOP
358       
359        .globl _CPU_Context_restore_fp_from_exception
360_CPU_Context_restore_fp_from_exception:
361        LDREGC1 $f0,FP0_OFFSET*F_SZ(a1)
362        LDREGC1 $f1,FP1_OFFSET*F_SZ(a1)
363        LDREGC1 $f2,FP2_OFFSET*F_SZ(a1)
364        LDREGC1 $f3,FP3_OFFSET*F_SZ(a1)
365        LDREGC1 $f4,FP4_OFFSET*F_SZ(a1)
366        LDREGC1 $f5,FP5_OFFSET*F_SZ(a1)
367        LDREGC1 $f6,FP6_OFFSET*F_SZ(a1)
368        LDREGC1 $f7,FP7_OFFSET*F_SZ(a1)
369        LDREGC1 $f8,FP8_OFFSET*F_SZ(a1)
370        LDREGC1 $f9,FP9_OFFSET*F_SZ(a1)
371        LDREGC1 $f10,FP10_OFFSET*F_SZ(a1)
372        LDREGC1 $f11,FP11_OFFSET*F_SZ(a1)
373        LDREGC1 $f12,FP12_OFFSET*F_SZ(a1)
374        LDREGC1 $f13,FP13_OFFSET*F_SZ(a1)
375        LDREGC1 $f14,FP14_OFFSET*F_SZ(a1)
376        LDREGC1 $f15,FP15_OFFSET*F_SZ(a1)
377        LDREGC1 $f16,FP16_OFFSET*F_SZ(a1)
378        LDREGC1 $f17,FP17_OFFSET*F_SZ(a1)
379        LDREGC1 $f18,FP18_OFFSET*F_SZ(a1)
380        LDREGC1 $f19,FP19_OFFSET*F_SZ(a1)
381        LDREGC1 $f20,FP20_OFFSET*F_SZ(a1)
382        LDREGC1 $f21,FP21_OFFSET*F_SZ(a1)
383        LDREGC1 $f22,FP22_OFFSET*F_SZ(a1)
384        LDREGC1 $f23,FP23_OFFSET*F_SZ(a1)
385        LDREGC1 $f24,FP24_OFFSET*F_SZ(a1)
386        LDREGC1 $f25,FP25_OFFSET*F_SZ(a1)
387        LDREGC1 $f26,FP26_OFFSET*F_SZ(a1)
388        LDREGC1 $f27,FP27_OFFSET*F_SZ(a1)
389        LDREGC1 $f28,FP28_OFFSET*F_SZ(a1)
390        LDREGC1 $f29,FP29_OFFSET*F_SZ(a1)
391        LDREGC1 $f30,FP30_OFFSET*F_SZ(a1)
392        LDREGC1 $f31,FP31_OFFSET*F_SZ(a1)
393        cfc1 a0,$31                  /* Read from FP status/control reg */
394        cfc1 a0,$31                  /* Two reads clear pipeline */
395        NOP                          /* NOPs ensure execution */
396        NOP
397        lw a0,FPCS_OFFSET*F_SZ(a1)   /* Load saved FPCS value */
398        NOP
399        ctc1 a0,$31                  /* Restore FPCS register */
400        NOP
401        j ra
402        NOP
403        .set at
404ENDFRAME(_CPU_Context_restore_fp)
405#endif
406
407/*  _CPU_Context_switch
408 *
409 *  This routine performs a normal non-FP context switch.
410 */
411
412/* void _CPU_Context_switch(
413 *   Context_Control  *run,
414 *   Context_Control  *heir
415 * )
416 */
417
418FRAME(_CPU_Context_switch,sp,0,ra)
419        .set noreorder
420
421        mfc0    t0,C0_SR
422#if (__mips == 3) || (__mips == 32)
423        li      t1,SR_IE
424#elif __mips == 1
425        li      t1,SR_IEC
426#endif
427        STREG   t0,C0_SR_OFFSET*R_SZ(a0)        /* save the task's SR */
428        not     t1
429        and     t0,t1                           /* mask off interrupts while we context switch */
430        mtc0    t0,C0_SR
431        NOP
432
433        STREG ra,RA_OFFSET*R_SZ(a0)             /* save current context */
434        STREG sp,SP_OFFSET*R_SZ(a0)
435        STREG fp,FP_OFFSET*R_SZ(a0)
436        STREG s0,S0_OFFSET*R_SZ(a0)
437        STREG s1,S1_OFFSET*R_SZ(a0)
438        STREG s2,S2_OFFSET*R_SZ(a0)
439        STREG s3,S3_OFFSET*R_SZ(a0)
440        STREG s4,S4_OFFSET*R_SZ(a0)
441        STREG s5,S5_OFFSET*R_SZ(a0)
442        STREG s6,S6_OFFSET*R_SZ(a0)
443        STREG s7,S7_OFFSET*R_SZ(a0)
444
445       
446        /*
447        ** this code grabs the userspace EPC if we're dispatching from
448        ** an interrupt frame or supplies the address of the dispatch
449        ** routines if not.  This is entirely for the gdbstub's benefit so
450        ** it can know where each task is running.
451        **
452        ** Its value is only set when calling threadDispatch from
453        ** the interrupt handler and is cleared immediately when this
454        ** routine gets it.
455        */
456       
457        la      t0,__exceptionStackFrame        /* see if we're coming in from an exception */
458        LDREG   t1, (t0)
459        NOP
460        beqz    t1,1f
461
462        STREG   zero, (t0)                      /* and clear it */
463        NOP
464        LDREG   t0,R_EPC*R_SZ(t1)               /* get the userspace EPC from the frame */
465        b       2f
466        NOP
467               
4681:      la      t0,_Thread_Dispatch             /* if ==0, we're switched out */
469
4702:      STREG   t0,C0_EPC_OFFSET*R_SZ(a0)
471       
472
473_CPU_Context_switch_restore:
474        LDREG ra,RA_OFFSET*R_SZ(a1)             /* restore context */
475        LDREG sp,SP_OFFSET*R_SZ(a1)
476        LDREG fp,FP_OFFSET*R_SZ(a1)
477        LDREG s0,S0_OFFSET*R_SZ(a1)
478        LDREG s1,S1_OFFSET*R_SZ(a1)
479        LDREG s2,S2_OFFSET*R_SZ(a1)
480        LDREG s3,S3_OFFSET*R_SZ(a1)
481        LDREG s4,S4_OFFSET*R_SZ(a1)
482        LDREG s5,S5_OFFSET*R_SZ(a1)
483        LDREG s6,S6_OFFSET*R_SZ(a1)
484        LDREG s7,S7_OFFSET*R_SZ(a1)
485
486        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
487       
488/*      NOP */
489/*#if (__mips == 3) || (__mips == 32) */
490/*        andi  t0,SR_EXL */
491/*        bnez  t0,_CPU_Context_1 */   /* set exception level from restore context */
492/*        li    t0,~SR_EXL */
493/*        MFC0  t1,C0_SR */
494/*        NOP */
495/*        and   t1,t0 */
496/*        MTC0  t1,C0_SR */
497/* */
498/*#elif __mips == 1 */
499/* */
500/*        andi  t0,(SR_INTERRUPT_ENABLE_BITS) */ /* we know 0 disabled */
501/*        beq   t0,$0,_CPU_Context_1  */         /* set level from restore context */
502/*        MFC0  t0,C0_SR */
503/*        NOP */
504/*        or    t0,(SR_INTERRUPT_ENABLE_BITS) */ /* new_sr = old sr with enabled  */
505/*        MTC0  t0,C0_SR */                     /* set with enabled */
506/*        NOP */
507
508
509/*
510** Incorporate the incoming task's FP coprocessor state and interrupt mask/enable
511** into the status register.  We jump thru the requisite hoops to ensure we
512** maintain all other SR bits as global values.
513**
514** Get the task's FPU enable, int mask & int enable bits.  Although we keep the
515** software int enables on a per-task basis, the rtems_task_create
516** Interrupt Level & int level manipulation functions cannot enable/disable them,
517** so they are automatically enabled for all tasks.  To turn them off, a task
518** must itself manipulate the SR register.
519**
520** Although something of a hack on this processor, we treat the SR register
521** int enables as the RTEMS interrupt level.  We use the int level
522** value as a bitmask, not as any sort of greater than/less than metric.
523** Manipulation of a task's interrupt level corresponds directly to manipulation
524** of that task's SR bits, as seen in cpu.c
525**
526** Note, interrupts are disabled before context is saved, though the task's
527** interrupt enable state is recorded.  The task swapping in will apply its
528** specific SR bits, including interrupt enable.  If further task-specific
529** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and
530** cpu.h task initialization code that will be affected.
531*/
532
533        li      t2,SR_CU1
534        or      t2,SR_IMASK
535
536        /* int enable bits */
537#if (__mips == 3) || (__mips == 32)
538        /*
539        ** Save IE
540        */
541        or      t2,SR_IE
542#elif __mips == 1
543        /*
544        ** Save current, previous & old int enables.  This is key because
545        ** we can dispatch from within the stack frame used by an
546        ** interrupt service.  The int enables nest, but not beyond
547        ** previous and old because of the dispatch interlock seen
548        ** in the interrupt processing code.
549        */
550        or      t2,SR_IEC + SR_IEP + SR_IEO
551#endif
552        and     t0,t2           /* keep only the per-task bits */
553               
554        mfc0    t1,C0_SR        /* grab the current SR */
555        not     t2             
556        and     t1,t2           /* mask off the old task's per-task bits */
557        or      t1,t0           /* or in the new task's bits */
558        mtc0    t1,C0_SR        /* and load the new SR */
559        NOP
560       
561/* _CPU_Context_1: */
562        j       ra
563        NOP
564ENDFRAME(_CPU_Context_switch)
565
566       
567/*
568 *  _CPU_Context_restore
569 *
570 *  This routine is generally used only to restart self in an
571 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
572 *
573 *  NOTE: May be unnecessary to reload some registers.
574 *
575 *  void _CPU_Context_restore(
576 *    Context_Control *new_context
577 *  );
578 */
579
580FRAME(_CPU_Context_restore,sp,0,ra)
581        .set noreorder
582        move    a1,a0
583        j       _CPU_Context_switch_restore
584        NOP
585
586ENDFRAME(_CPU_Context_restore)
587
588       
589ASM_EXTERN(_ISR_Nest_level,4)
590ASM_EXTERN(_Thread_Dispatch_disable_level,4)
591ASM_EXTERN(_Context_Switch_necessary,1)
592ASM_EXTERN(_ISR_Signals_to_thread_executing,1)
593ASM_EXTERN(_Thread_Executing,4)
594       
595.extern _Thread_Dispatch
596.extern _ISR_Vector_table
597
598
599       
600
601
602/*  void _DBG_Handler()
603 *
604 *  This routine services the (at least) MIPS1 debug vector,
605 *  only used the the hardware debugging features.  This code,
606 *  while optional, is best located here because its intrinsically
607 *  associated with exceptions in general & thus tied pretty
608 *  closely to _ISR_Handler.
609 *
610 */
611
612
613FRAME(_DBG_Handler,sp,0,ra)
614        .set noreorder
615        la      k0,_ISR_Handler
616        j       k0
617        NOP
618        .set reorder
619ENDFRAME(_DBG_Handler)
620
621
622
623
624       
625/*  void __ISR_Handler()
626 *
627 *  This routine provides the RTEMS interrupt management.
628 *
629 *  void _ISR_Handler()
630 *
631 *
632 *  This discussion ignores a lot of the ugly details in a real
633 *  implementation such as saving enough registers/state to be
634 *  able to do something real.  Keep in mind that the goal is
635 *  to invoke a user's ISR handler which is written in C and
636 *  uses a certain set of registers.
637 *
638 *  Also note that the exact order is to a large extent flexible.
639 *  Hardware will dictate a sequence for a certain subset of
640 *  _ISR_Handler while requirements for setting
641 *
642 *  At entry to "common" _ISR_Handler, the vector number must be
643 *  available.  On some CPUs the hardware puts either the vector
644 *  number or the offset into the vector table for this ISR in a
645 *  known place.  If the hardware does not give us this information,
646 *  then the assembly portion of RTEMS for this port will contain
647 *  a set of distinct interrupt entry points which somehow place
648 *  the vector number in a known place (which is safe if another
649 *  interrupt nests this one) and branches to _ISR_Handler.
650 *
651 */
652
653FRAME(_ISR_Handler,sp,0,ra)
654        .set noreorder
655
656        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
657
658        /* wastes a lot of stack space for context?? */
659        ADDIU    sp,sp,-EXCP_STACK_SIZE
660
661        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
662        STREG v0, R_V0*R_SZ(sp)
663        STREG v1, R_V1*R_SZ(sp)
664        STREG a0, R_A0*R_SZ(sp)
665        STREG a1, R_A1*R_SZ(sp)
666        STREG a2, R_A2*R_SZ(sp)
667        STREG a3, R_A3*R_SZ(sp)
668        STREG t0, R_T0*R_SZ(sp)
669        STREG t1, R_T1*R_SZ(sp)
670        STREG t2, R_T2*R_SZ(sp)
671        STREG t3, R_T3*R_SZ(sp)
672        STREG t4, R_T4*R_SZ(sp)
673        STREG t5, R_T5*R_SZ(sp)
674        STREG t6, R_T6*R_SZ(sp)
675        STREG t7, R_T7*R_SZ(sp)
676        mflo  t0
677        STREG t8, R_T8*R_SZ(sp)
678        STREG t0, R_MDLO*R_SZ(sp)
679        STREG t9, R_T9*R_SZ(sp)
680        mfhi  t0
681        STREG gp, R_GP*R_SZ(sp)
682        STREG t0, R_MDHI*R_SZ(sp)
683        STREG fp, R_FP*R_SZ(sp)
684       
685        .set noat
686        STREG AT, R_AT*R_SZ(sp)
687        .set at
688
689        mfc0     t0,C0_SR
690        MFCO     t1,C0_EPC
691        STREG    t0,R_SR*R_SZ(sp)
692        STREG    t1,R_EPC*R_SZ(sp)
693
694
695#ifdef INSTRUMENT_EXECUTING_THREAD
696        lw t2, _Thread_Executing
697        NOP
698        sw t2, 0x8001FFF0
699#endif
700
701        /* determine if an interrupt generated this exception */
702
703        mfc0     t0,C0_CAUSE
704        NOP
705
706        and      t1,t0,CAUSE_EXCMASK
707        beq      t1, 0, _ISR_Handler_1
708
709_ISR_Handler_Exception:
710
711        /*  If we return from the exception, it is assumed nothing
712         *  bad is going on and we can continue to run normally.
713         *  But we want to save the entire CPU context so exception
714         *  handlers can look at it and change it.
715         *
716         *  NOTE: This is the path the debugger stub will take.
717         */
718
719        /* already got t0 = cause in the interrupt test above */
720        STREG    t0,R_CAUSE*R_SZ(sp)
721
722        STREG    sp, R_SP*R_SZ(sp)
723       
724        STREG    s0,R_S0*R_SZ(sp)     /* save s0 - s7 */
725        STREG    s1,R_S1*R_SZ(sp)
726        STREG    s2,R_S2*R_SZ(sp)
727        STREG    s3,R_S3*R_SZ(sp)
728        STREG    s4,R_S4*R_SZ(sp)
729        STREG    s5,R_S5*R_SZ(sp)
730        STREG    s6,R_S6*R_SZ(sp)
731        STREG    s7,R_S7*R_SZ(sp)
732
733        /* CP0 special registers */
734
735#if __mips == 1
736        mfc0     t0,C0_TAR
737#endif
738        MFCO     t1,C0_BADVADDR
739       
740#if __mips == 1
741        STREG    t0,R_TAR*R_SZ(sp)
742#else
743        NOP
744#endif
745        STREG    t1,R_BADVADDR*R_SZ(sp)
746       
747#if ( CPU_HARDWARE_FP == TRUE )
748        mfc0     t0,C0_SR                 /* FPU is enabled, save state */
749        NOP
750        srl      t0,t0,16
751        andi     t0,t0,(SR_CU1 >> 16)
752        beqz     t0, 1f
753        NOP
754
755        la       a1,R_F0*R_SZ(sp)
756        jal      _CPU_Context_save_fp_from_exception
757        NOP
758        mfc1     t0,C1_REVISION
759        mfc1     t1,C1_STATUS
760        STREG    t0,R_FEIR*R_SZ(sp)
761        STREG    t1,R_FCSR*R_SZ(sp)
762
7631:
764#endif
765       
766        move     a0,sp
767        jal      mips_vector_exceptions
768        NOP
769
770       
771        /*
772        ** Note, if the exception vector returns, rely on it to have
773        ** adjusted EPC so we will return to some correct address.  If
774        ** this is not done, we might get stuck in an infinite loop because
775        ** we'll return to the instruction where the exception occured and
776        ** it could throw again.
777        **
778        ** It is expected the only code using the exception processing is
779        ** either the gdb stub or some user code which is either going to
780        ** panic or do something useful.  Regardless, it is up to each
781        ** exception routine to properly adjust EPC, so the code below
782        ** may be helpful for doing just that.
783        */
784       
785/* *********************************************************************
786** this code follows the R3000's exception return logic, but is not
787** needed because the gdb stub does it for us.  It might be useful
788** for something else at some point...
789**
790        * compute the address of the instruction we'll return to *
791
792        LDREG   t1, R_CAUSE*R_SZ(sp)
793        LDREG   t0, R_EPC*R_SZ(sp)
794
795        * first see if the exception happened in the delay slot *
796        li      t3,CAUSE_BD
797        AND     t4,t1,t3
798        beqz    t4,excnodelay
799        NOP
800       
801        * it did, now see if the branch occured or not *
802        li      t3,CAUSE_BT
803        AND     t4,t1,t3
804        beqz    t4,excnobranch
805        NOP
806       
807        * branch was taken, we resume at the branch target *
808        LDREG   t0, R_TAR*R_SZ(sp)
809        j       excreturn
810        NOP
811
812excnobranch:
813        ADDU    t0,R_SZ
814
815excnodelay:     
816        ADDU    t0,R_SZ
817               
818excreturn:     
819        STREG   t0, R_EPC*R_SZ(sp)
820        NOP
821********************************************************************* */
822       
823
824 /* if we're returning into mips_break, move to the next instruction */
825       
826        LDREG   t0,R_EPC*R_SZ(sp)
827        la      t1,mips_break
828        xor     t2,t0,t1
829        bnez    t2,3f
830       
831        addu    t0,R_SZ
832        STREG   t0,R_EPC*R_SZ(sp)
833        NOP
8343:     
835
836       
837       
838               
839#if ( CPU_HARDWARE_FP == TRUE )
840        mfc0     t0,C0_SR               /* FPU is enabled, restore state */
841        NOP
842        srl      t0,t0,16
843        andi     t0,t0,(SR_CU1 >> 16)
844        beqz     t0, 2f
845        NOP
846
847        la       a1,R_F0*R_SZ(sp)
848        jal      _CPU_Context_restore_fp_from_exception
849        NOP
850        LDREG    t0,R_FEIR*R_SZ(sp)
851        LDREG    t1,R_FCSR*R_SZ(sp)
852        mtc1     t0,C1_REVISION
853        mtc1     t1,C1_STATUS
8542:
855#endif
856        LDREG    s0,R_S0*R_SZ(sp)    /* restore s0 - s7 */
857        LDREG    s1,R_S1*R_SZ(sp)
858        LDREG    s2,R_S2*R_SZ(sp)
859        LDREG    s3,R_S3*R_SZ(sp)
860        LDREG    s4,R_S4*R_SZ(sp)
861        LDREG    s5,R_S5*R_SZ(sp)
862        LDREG    s6,R_S6*R_SZ(sp)
863        LDREG    s7,R_S7*R_SZ(sp)
864
865        /* do NOT restore the sp as this could mess up the world */
866        /* do NOT restore the cause as this could mess up the world */
867
868        /*
869        ** Jump all the way out.  If theres a pending interrupt, just
870        ** let it be serviced later.  Since we're probably using the
871        ** gdb stub, we've already disrupted the ISR service timing
872        ** anyhow.  We oughtn't mix exception and interrupt processing
873        ** in the same exception call in case the exception stuff
874        ** might interfere with the dispatching & timer ticks.
875        */
876        j        _ISR_Handler_exit
877        NOP
878
879_ISR_Handler_1:
880
881        mfc0     t1,C0_SR
882        and      t0,CAUSE_IPMASK
883        and      t0,t1
884
885        /* external interrupt not enabled, ignore */
886        /* but if it's not an exception or an interrupt, */
887        /* Then where did it come from??? */
888       
889        beq      t0,zero,_ISR_Handler_exit
890        NOP
891
892       
893  /*
894   *  save some or all context on stack
895   *  may need to save some special interrupt information for exit
896   *
897   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
898   *    if ( _ISR_Nest_level == 0 )
899   *      switch to software interrupt stack
900   *  #endif
901   */
902
903
904  /*
905   *  _ISR_Nest_level++;
906   */
907        lw      t0,_ISR_Nest_level
908        NOP
909        add     t0,t0,1
910        sw      t0,_ISR_Nest_level
911  /*
912   *  _Thread_Dispatch_disable_level++;
913   */
914        lw      t1,_Thread_Dispatch_disable_level
915        NOP
916        add     t1,t1,1
917        sw      t1,_Thread_Dispatch_disable_level
918
919  /*
920   *  Call the CPU model or BSP specific routine to decode the
921   *  interrupt source and actually vector to device ISR handlers.
922   */
923
924#ifdef INSTRUMENT_ISR_VECTORING
925        NOP
926        li      t1, 1
927        sw      t1, 0x8001e000
928#endif
929
930        move     a0,sp
931        jal      mips_vector_isr_handlers
932        NOP
933
934#ifdef INSTRUMENT_ISR_VECTORING
935        li      t1, 0
936        sw      t1, 0x8001e000
937        NOP
938#endif
939
940  /*
941   *  --_ISR_Nest_level;
942   */
943        lw      t2,_ISR_Nest_level
944        NOP
945        add     t2,t2,-1
946        sw      t2,_ISR_Nest_level
947  /*
948   *  --_Thread_Dispatch_disable_level;
949   */
950        lw      t1,_Thread_Dispatch_disable_level
951        NOP
952        add     t1,t1,-1
953        sw      t1,_Thread_Dispatch_disable_level
954  /*
955   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
956   *    goto the label "exit interrupt (simple case)"
957   */
958        or  t0,t2,t1
959        bne t0,zero,_ISR_Handler_exit
960        NOP
961
962
963  /*
964   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
965   *    restore stack
966   *  #endif
967   *
968   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
969   *    goto the label "exit interrupt (simple case)"
970   */
971        lbu     t0,_Context_Switch_necessary
972        lbu     t1,_ISR_Signals_to_thread_executing
973        NOP
974        or      t0,t0,t1
975        beq     t0,zero,_ISR_Handler_exit
976        NOP
977
978       
979       
980#ifdef INSTRUMENT_EXECUTING_THREAD
981        lw      t0,_Thread_Executing
982        NOP
983        sw      t0,0x8001FFF4
984#endif
985
986/*
987** Turn on interrupts before entering Thread_Dispatch which
988** will run for a while, thus allowing new interrupts to
989** be serviced.  Observe the Thread_Dispatch_disable_level interlock
990** that prevents recursive entry into Thread_Dispatch.
991*/
992
993        mfc0    t0, C0_SR
994#if __mips == 1
995       
996        li      t1,SR_IEC
997        or      t0, t1
998       
999#elif (__mips == 3) || (__mips == 32)
1000       
1001        /*
1002        ** clear XL and set IE so we can get interrupts.
1003        */
1004        li      t1, SR_EXL
1005        not     t1
1006        and     t0,t1
1007        or      t0, SR_IE
1008       
1009#endif
1010        mtc0    t0, C0_SR
1011        NOP
1012
1013        /* save off our stack frame so the context switcher can get to it */
1014        la      t0,__exceptionStackFrame
1015        STREG   sp,(t0)
1016                                       
1017        jal     _Thread_Dispatch
1018        NOP
1019
1020        /*
1021        ** And make sure its clear in case we didn't dispatch.  if we did, its
1022        ** already cleared
1023        */
1024        la      t0,__exceptionStackFrame
1025        STREG   zero,(t0)
1026        NOP
1027
1028/*
1029** turn interrupts back off while we restore context so
1030** a badly timed interrupt won't mess things up
1031*/
1032        mfc0    t0, C0_SR
1033
1034#if __mips == 1
1035
1036        /* ints off, current & prev kernel mode on (kernel mode enabled is bit clear..argh!) */
1037        li      t1,SR_IEC | SR_KUP | SR_KUC     
1038        not     t1
1039        and     t0, t1
1040        mtc0    t0, C0_SR
1041        NOP
1042
1043#elif (__mips == 3) || (__mips == 32)
1044
1045        /* make sure EXL and IE are set so ints are disabled & we can update EPC for the return */
1046        li   t1,SR_IE           /* Clear IE first (recommended) */
1047        not  t1
1048        and  t0,t1
1049        mtc0 t0,C0_SR
1050        NOP
1051       
1052        /* apply task's SR with EXL set so the eret will return properly */
1053        or      t0, SR_EXL | SR_IE
1054        mtc0    t0, C0_SR
1055        NOP
1056
1057        /* store new EPC value, which we can do since EXL=0 */
1058        LDREG   t0, R_EPC*R_SZ(sp)
1059        NOP
1060        MTCO    t0, C0_EPC
1061        NOP
1062       
1063#endif
1064
1065
1066
1067
1068
1069
1070#ifdef INSTRUMENT_EXECUTING_THREAD
1071        lw      t0,_Thread_Executing
1072        NOP
1073        sw      t0,0x8001FFF8
1074#endif
1075
1076       
1077  /*
1078   *  prepare to get out of interrupt
1079   *  return from interrupt  (maybe to _ISR_Dispatch)
1080   *
1081   *  LABEL "exit interrupt (simple case):"
1082   *  prepare to get out of interrupt
1083   *  return from interrupt
1084   */
1085
1086_ISR_Handler_exit:
1087/*
1088** Skip the SR restore because its a global register. _CPU_Context_switch_restore
1089** adjusts it according to each task's configuration.  If we didn't dispatch, the
1090** SR value isn't changed, so all we need to do is return.
1091**
1092*/
1093        /* restore context from stack */
1094       
1095#ifdef INSTRUMENT_EXECUTING_THREAD
1096        lw      t0,_Thread_Executing
1097        NOP
1098        sw      t0, 0x8001FFFC
1099#endif
1100
1101        LDREG t8, R_MDLO*R_SZ(sp)
1102        LDREG t0, R_T0*R_SZ(sp)
1103        mtlo  t8
1104        LDREG t8, R_MDHI*R_SZ(sp)
1105        LDREG t1, R_T1*R_SZ(sp)
1106        mthi  t8
1107        LDREG t2, R_T2*R_SZ(sp)
1108        LDREG t3, R_T3*R_SZ(sp)
1109        LDREG t4, R_T4*R_SZ(sp)
1110        LDREG t5, R_T5*R_SZ(sp)
1111        LDREG t6, R_T6*R_SZ(sp)
1112        LDREG t7, R_T7*R_SZ(sp)
1113        LDREG t8, R_T8*R_SZ(sp)
1114        LDREG t9, R_T9*R_SZ(sp)
1115        LDREG gp, R_GP*R_SZ(sp)
1116        LDREG fp, R_FP*R_SZ(sp)
1117        LDREG ra, R_RA*R_SZ(sp)
1118        LDREG a0, R_A0*R_SZ(sp)
1119        LDREG a1, R_A1*R_SZ(sp)
1120        LDREG a2, R_A2*R_SZ(sp)
1121        LDREG a3, R_A3*R_SZ(sp)
1122        LDREG v1, R_V1*R_SZ(sp)
1123        LDREG v0, R_V0*R_SZ(sp)
1124       
1125#if __mips == 1
1126        LDREG     k1, R_EPC*R_SZ(sp)
1127#endif
1128               
1129        .set noat
1130        LDREG     AT, R_AT*R_SZ(sp)
1131        .set at
1132
1133        ADDIU     sp,sp,EXCP_STACK_SIZE
1134
1135#if (__mips == 3) || (__mips == 32)
1136        eret
1137#elif __mips == 1
1138        j         k1
1139        rfe
1140#endif
1141        NOP
1142
1143       .set    reorder
1144ENDFRAME(_ISR_Handler)
1145
1146
1147
1148       
1149FRAME(mips_break,sp,0,ra)
1150        .set noreorder
1151        break   0x0     /* this statement must be first in this function, assumed so by mips-stub.c */
1152        NOP
1153        j       ra
1154        NOP
1155       .set    reorder
1156ENDFRAME(mips_break)
1157
Note: See TracBrowser for help on using the repository browser.