source: rtems/cpukit/score/cpu/mips/cpu_asm.S @ 6d42b4c6

4.11
Last change on this file since 6d42b4c6 was 6d42b4c6, checked in by Joel Sherrill <joel.sherrill@…>, on Jun 29, 2010 at 12:34:00 AM

2010-06-28 Joel Sherrill <joel.sherrill@…>

PR 1573/cpukit

  • cpu_asm.S, rtems/score/cpu.h: Add a per cpu data structure which contains the information required by RTEMS for each CPU core. This encapsulates information such as thread executing, heir, idle and dispatch needed.
  • Property mode set to 100644
File size: 30.2 KB
Line 
1/*
2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
22 *          the baseline of the more general MIPS port.
23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
26 *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
27 *          performance, tweaking this code and the isr vectoring routines
28 *          to reduce overhead & latencies.  Added optional
29 *          instrumentation as well.
30 *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
31 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
32 *          and deferred FP contexts.
33 *    2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing
34 *          by increasing the amount of context saved/restored.
35 *    2004: 24March, Art Ferrer, NASA/GSFC, added save of FP status/control
36 *          register to fix intermittent FP error encountered on ST5 mission
37 *          implementation on Mongoose V processor.
38 *    2004: April 7, Greg Menke <gregory.menke@gsfc.nasa.gov> Added __mips==32
39 *          support for R4000 processors running 32 bit code.  Fixed #define
40 *          problems that caused fpu code to always be included even when no
41 *          fpu is present.
42 *
43 *  COPYRIGHT (c) 1989-2002.
44 *  On-Line Applications Research Corporation (OAR).
45 *
46 *  The license and distribution terms for this file may be
47 *  found in the file LICENSE in this distribution or at
48 *  http://www.rtems.com/license/LICENSE.
49 *
50 *  $Id$
51 */
52
53#ifdef HAVE_CONFIG_H
54#include "config.h"
55#endif
56
57#include <rtems/asm.h>
58#include <rtems/mips/iregdef.h>
59#include <rtems/mips/idtcpu.h>
60#include <rtems/score/percpu.h>
61
62#define ASSEMBLY_ONLY
63#include <rtems/score/cpu.h>
64
65#if TRUE
66#else
67#error TRUE is not true
68#endif
69#if FALSE
70#error FALSE is not false
71#else
72#endif
73
74/*
75#if ( CPU_HARDWARE_FP == TRUE )
76#warning CPU_HARDWARE_FP == TRUE
77#else
78#warning CPU_HARDWARE_FP != TRUE
79#endif
80*/
81
82
83/* enable debugging shadow writes to misc ram, this is a vestigal
84* Mongoose-ism debug tool- but may be handy in the future so we
85* left it in...
86*/
87
88/* #define INSTRUMENT_ISR_VECTORING */
89/* #define INSTRUMENT_EXECUTING_THREAD */
90
91
92
93/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
94 *  and MIPS ISA Level 1 (R3xxx).
95 */
96
97#if __mips == 3
98/* 64 bit register operations */
99#define NOP     nop
100#define ADD     dadd
101#define STREG   sd
102#define LDREG   ld
103#define MFCO    dmfc0           /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
104#define MTCO    dmtc0           /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
105#define ADDU    addu
106#define ADDIU   addiu
107#if (__mips_fpr==32)
108#define STREGC1 swc1
109#define LDREGC1 lwc1
110#elif (__mips_fpr==64)          /* Use these instructions if there are 64 bit floating point registers. This requires FR bit to be set in C0_SR */
111#define STREGC1 sdc1
112#define LDREGC1 ldc1
113#endif
114#define R_SZ    8
115#define F_SZ    8
116#define SZ_INT  8
117#define SZ_INT_POW2 3
118
119/* XXX if we don't always want 64 bit register ops, then another ifdef */
120
121#elif (__mips == 1 ) || (__mips == 32)
122/* 32 bit register operations*/
123#define NOP     nop
124#define ADD     add
125#define STREG   sw
126#define LDREG   lw
127#define MFCO    mfc0
128#define MTCO    mtc0
129#define ADDU    add
130#define ADDIU   addi
131#define STREGC1 swc1
132#define LDREGC1 lwc1
133#define R_SZ    4
134#define F_SZ    4
135#define SZ_INT  4
136#define SZ_INT_POW2 2
137#else
138#error "mips assembly: what size registers do I deal with?"
139#endif
140
141
142#define ISR_VEC_SIZE    4
143#define EXCP_STACK_SIZE (NREGS*R_SZ)
144
145
146#ifdef __GNUC__
147#define ASM_EXTERN(x,size) .extern x,size
148#else
149#define ASM_EXTERN(x,size)
150#endif
151
152/* NOTE: these constants must match the Context_Control structure in cpu.h */
153#define S0_OFFSET 0
154#define S1_OFFSET 1
155#define S2_OFFSET 2
156#define S3_OFFSET 3
157#define S4_OFFSET 4
158#define S5_OFFSET 5
159#define S6_OFFSET 6
160#define S7_OFFSET 7
161#define SP_OFFSET 8
162#define FP_OFFSET 9
163#define RA_OFFSET 10
164#define C0_SR_OFFSET 11
165#define C0_EPC_OFFSET 12
166
167/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
168#define FP0_OFFSET  0
169#define FP1_OFFSET  1
170#define FP2_OFFSET  2
171#define FP3_OFFSET  3
172#define FP4_OFFSET  4
173#define FP5_OFFSET  5
174#define FP6_OFFSET  6
175#define FP7_OFFSET  7
176#define FP8_OFFSET  8
177#define FP9_OFFSET  9
178#define FP10_OFFSET 10
179#define FP11_OFFSET 11
180#define FP12_OFFSET 12
181#define FP13_OFFSET 13
182#define FP14_OFFSET 14
183#define FP15_OFFSET 15
184#define FP16_OFFSET 16
185#define FP17_OFFSET 17
186#define FP18_OFFSET 18
187#define FP19_OFFSET 19
188#define FP20_OFFSET 20
189#define FP21_OFFSET 21
190#define FP22_OFFSET 22
191#define FP23_OFFSET 23
192#define FP24_OFFSET 24
193#define FP25_OFFSET 25
194#define FP26_OFFSET 26
195#define FP27_OFFSET 27
196#define FP28_OFFSET 28
197#define FP29_OFFSET 29
198#define FP30_OFFSET 30
199#define FP31_OFFSET 31
200#define FPCS_OFFSET 32
201
202
203ASM_EXTERN(__exceptionStackFrame, SZ_INT)
204
205/*
206 *  _CPU_Context_save_fp_context
207 *
208 *  This routine is responsible for saving the FP context
209 *  at *fp_context_ptr.  If the point to load the FP context
210 *  from is changed then the pointer is modified by this routine.
211 *
212 *  Sometimes a macro implementation of this is in cpu.h which dereferences
213 *  the ** and a similarly named routine in this file is passed something
214 *  like a (Context_Control_fp *).  The general rule on making this decision
215 *  is to avoid writing assembly language.
216 */
217
218/* void _CPU_Context_save_fp(
219 *   void **fp_context_ptr
220 * );
221 */
222
223#if ( CPU_HARDWARE_FP == TRUE )
224FRAME(_CPU_Context_save_fp,sp,0,ra)
225        .set noreorder
226        .set noat
227
228        /*
229        ** Make sure the FPU is on before we save state.  This code
230        ** is here because the FPU context switch might occur when an
231        ** integer task is switching out with a FP task switching in.
232        */
233        mfc0    t0,C0_SR
234        li      t2,SR_CU1
235        move    t1,t0
236        or      t0,t2           /* turn on the fpu */
237#if (__mips == 3) || (__mips == 32)
238        li      t2,SR_IE
239#elif __mips == 1
240        li      t2,SR_IEC
241#endif
242        not     t2
243        and     t0,t2           /* turn off interrupts */
244        mtc0    t0,C0_SR
245
246        lw      a1,(a0)         /* get address of context storage area */
247        move    t0,ra
248        jal     _CPU_Context_save_fp_from_exception
249        NOP
250
251        /*
252        ** Reassert the task's state because we've not saved it yet.
253        */
254        mtc0    t1,C0_SR
255        j       t0
256        NOP
257
258        .globl _CPU_Context_save_fp_from_exception
259_CPU_Context_save_fp_from_exception:
260        STREGC1 $f0,FP0_OFFSET*F_SZ(a1)
261        STREGC1 $f1,FP1_OFFSET*F_SZ(a1)
262        STREGC1 $f2,FP2_OFFSET*F_SZ(a1)
263        STREGC1 $f3,FP3_OFFSET*F_SZ(a1)
264        STREGC1 $f4,FP4_OFFSET*F_SZ(a1)
265        STREGC1 $f5,FP5_OFFSET*F_SZ(a1)
266        STREGC1 $f6,FP6_OFFSET*F_SZ(a1)
267        STREGC1 $f7,FP7_OFFSET*F_SZ(a1)
268        STREGC1 $f8,FP8_OFFSET*F_SZ(a1)
269        STREGC1 $f9,FP9_OFFSET*F_SZ(a1)
270        STREGC1 $f10,FP10_OFFSET*F_SZ(a1)
271        STREGC1 $f11,FP11_OFFSET*F_SZ(a1)
272        STREGC1 $f12,FP12_OFFSET*F_SZ(a1)
273        STREGC1 $f13,FP13_OFFSET*F_SZ(a1)
274        STREGC1 $f14,FP14_OFFSET*F_SZ(a1)
275        STREGC1 $f15,FP15_OFFSET*F_SZ(a1)
276        STREGC1 $f16,FP16_OFFSET*F_SZ(a1)
277        STREGC1 $f17,FP17_OFFSET*F_SZ(a1)
278        STREGC1 $f18,FP18_OFFSET*F_SZ(a1)
279        STREGC1 $f19,FP19_OFFSET*F_SZ(a1)
280        STREGC1 $f20,FP20_OFFSET*F_SZ(a1)
281        STREGC1 $f21,FP21_OFFSET*F_SZ(a1)
282        STREGC1 $f22,FP22_OFFSET*F_SZ(a1)
283        STREGC1 $f23,FP23_OFFSET*F_SZ(a1)
284        STREGC1 $f24,FP24_OFFSET*F_SZ(a1)
285        STREGC1 $f25,FP25_OFFSET*F_SZ(a1)
286        STREGC1 $f26,FP26_OFFSET*F_SZ(a1)
287        STREGC1 $f27,FP27_OFFSET*F_SZ(a1)
288        STREGC1 $f28,FP28_OFFSET*F_SZ(a1)
289        STREGC1 $f29,FP29_OFFSET*F_SZ(a1)
290        STREGC1 $f30,FP30_OFFSET*F_SZ(a1)
291        STREGC1 $f31,FP31_OFFSET*F_SZ(a1)
292        cfc1 a0,$31                    /* Read FP status/conrol reg */
293        cfc1 a0,$31                    /* Two reads clear pipeline */
294        NOP
295        NOP
296        sw a0, FPCS_OFFSET*F_SZ(a1)    /* Store value to FPCS location */
297        NOP
298        j ra
299        NOP
300        .set at
301ENDFRAME(_CPU_Context_save_fp)
302#endif
303
304/*
305 *  _CPU_Context_restore_fp_context
306 *
307 *  This routine is responsible for restoring the FP context
308 *  at *fp_context_ptr.  If the point to load the FP context
309 *  from is changed then the pointer is modified by this routine.
310 *
311 *  Sometimes a macro implementation of this is in cpu.h which dereferences
312 *  the ** and a similarly named routine in this file is passed something
313 *  like a (Context_Control_fp *).  The general rule on making this decision
314 *  is to avoid writing assembly language.
315 */
316
317/* void _CPU_Context_restore_fp(
318 *   void **fp_context_ptr
319 * )
320 */
321
322#if ( CPU_HARDWARE_FP == TRUE )
323FRAME(_CPU_Context_restore_fp,sp,0,ra)
324        .set noat
325        .set noreorder
326
327        /*
328        ** Make sure the FPU is on before we retrieve state.  This code
329        ** is here because the FPU context switch might occur when an
330        ** integer task is switching out with a FP task switching in.
331        */
332        mfc0    t0,C0_SR
333        li      t2,SR_CU1
334        move    t1,t0
335        or      t0,t2           /* turn on the fpu */
336#if (__mips == 3) || (__mips == 32)
337        li      t2,SR_IE
338#elif __mips == 1
339        li      t2,SR_IEC
340#endif
341        not     t2
342        and     t0,t2           /* turn off interrupts */
343        mtc0    t0,C0_SR
344
345        lw      a1,(a0)         /* get address of context storage area */
346        move    t0,ra
347        jal     _CPU_Context_restore_fp_from_exception
348        NOP
349
350        /*
351        ** Reassert the old task's state because we've not restored the
352        ** new one yet.
353        */
354        mtc0    t1,C0_SR
355        j       t0
356        NOP
357
358        .globl _CPU_Context_restore_fp_from_exception
359_CPU_Context_restore_fp_from_exception:
360        LDREGC1 $f0,FP0_OFFSET*F_SZ(a1)
361        LDREGC1 $f1,FP1_OFFSET*F_SZ(a1)
362        LDREGC1 $f2,FP2_OFFSET*F_SZ(a1)
363        LDREGC1 $f3,FP3_OFFSET*F_SZ(a1)
364        LDREGC1 $f4,FP4_OFFSET*F_SZ(a1)
365        LDREGC1 $f5,FP5_OFFSET*F_SZ(a1)
366        LDREGC1 $f6,FP6_OFFSET*F_SZ(a1)
367        LDREGC1 $f7,FP7_OFFSET*F_SZ(a1)
368        LDREGC1 $f8,FP8_OFFSET*F_SZ(a1)
369        LDREGC1 $f9,FP9_OFFSET*F_SZ(a1)
370        LDREGC1 $f10,FP10_OFFSET*F_SZ(a1)
371        LDREGC1 $f11,FP11_OFFSET*F_SZ(a1)
372        LDREGC1 $f12,FP12_OFFSET*F_SZ(a1)
373        LDREGC1 $f13,FP13_OFFSET*F_SZ(a1)
374        LDREGC1 $f14,FP14_OFFSET*F_SZ(a1)
375        LDREGC1 $f15,FP15_OFFSET*F_SZ(a1)
376        LDREGC1 $f16,FP16_OFFSET*F_SZ(a1)
377        LDREGC1 $f17,FP17_OFFSET*F_SZ(a1)
378        LDREGC1 $f18,FP18_OFFSET*F_SZ(a1)
379        LDREGC1 $f19,FP19_OFFSET*F_SZ(a1)
380        LDREGC1 $f20,FP20_OFFSET*F_SZ(a1)
381        LDREGC1 $f21,FP21_OFFSET*F_SZ(a1)
382        LDREGC1 $f22,FP22_OFFSET*F_SZ(a1)
383        LDREGC1 $f23,FP23_OFFSET*F_SZ(a1)
384        LDREGC1 $f24,FP24_OFFSET*F_SZ(a1)
385        LDREGC1 $f25,FP25_OFFSET*F_SZ(a1)
386        LDREGC1 $f26,FP26_OFFSET*F_SZ(a1)
387        LDREGC1 $f27,FP27_OFFSET*F_SZ(a1)
388        LDREGC1 $f28,FP28_OFFSET*F_SZ(a1)
389        LDREGC1 $f29,FP29_OFFSET*F_SZ(a1)
390        LDREGC1 $f30,FP30_OFFSET*F_SZ(a1)
391        LDREGC1 $f31,FP31_OFFSET*F_SZ(a1)
392        cfc1 a0,$31                  /* Read from FP status/control reg */
393        cfc1 a0,$31                  /* Two reads clear pipeline */
394        NOP                          /* NOPs ensure execution */
395        NOP
396        lw a0,FPCS_OFFSET*F_SZ(a1)   /* Load saved FPCS value */
397        NOP
398        ctc1 a0,$31                  /* Restore FPCS register */
399        NOP
400        j ra
401        NOP
402        .set at
403ENDFRAME(_CPU_Context_restore_fp)
404#endif
405
406/*  _CPU_Context_switch
407 *
408 *  This routine performs a normal non-FP context switch.
409 */
410
411/* void _CPU_Context_switch(
412 *   Context_Control  *run,
413 *   Context_Control  *heir
414 * )
415 */
416
417FRAME(_CPU_Context_switch,sp,0,ra)
418        .set noreorder
419
420        mfc0    t0,C0_SR
421#if (__mips == 3) || (__mips == 32)
422        li      t1,SR_IE
423#elif __mips == 1
424        li      t1,SR_IEC
425#endif
426        STREG   t0,C0_SR_OFFSET*R_SZ(a0)        /* save the task's SR */
427        not     t1
428        and     t0,t1                           /* mask off interrupts while we context switch */
429        mtc0    t0,C0_SR
430        NOP
431
432        STREG ra,RA_OFFSET*R_SZ(a0)             /* save current context */
433        STREG sp,SP_OFFSET*R_SZ(a0)
434        STREG fp,FP_OFFSET*R_SZ(a0)
435        STREG s0,S0_OFFSET*R_SZ(a0)
436        STREG s1,S1_OFFSET*R_SZ(a0)
437        STREG s2,S2_OFFSET*R_SZ(a0)
438        STREG s3,S3_OFFSET*R_SZ(a0)
439        STREG s4,S4_OFFSET*R_SZ(a0)
440        STREG s5,S5_OFFSET*R_SZ(a0)
441        STREG s6,S6_OFFSET*R_SZ(a0)
442        STREG s7,S7_OFFSET*R_SZ(a0)
443
444
445        /*
446        ** this code grabs the userspace EPC if we're dispatching from
447        ** an interrupt frame or supplies the address of the dispatch
448        ** routines if not.  This is entirely for the gdbstub's benefit so
449        ** it can know where each task is running.
450        **
451        ** Its value is only set when calling threadDispatch from
452        ** the interrupt handler and is cleared immediately when this
453        ** routine gets it.
454        */
455
456        la      t0,__exceptionStackFrame        /* see if we're coming in from an exception */
457        LDREG   t1, (t0)
458        NOP
459        beqz    t1,1f
460
461        STREG   zero, (t0)                      /* and clear it */
462        NOP
463        LDREG   t0,R_EPC*R_SZ(t1)               /* get the userspace EPC from the frame */
464        b       2f
465        NOP
466
4671:      la      t0,_Thread_Dispatch             /* if ==0, we're switched out */
468
4692:      STREG   t0,C0_EPC_OFFSET*R_SZ(a0)
470
471
472_CPU_Context_switch_restore:
473        LDREG ra,RA_OFFSET*R_SZ(a1)             /* restore context */
474        LDREG sp,SP_OFFSET*R_SZ(a1)
475        LDREG fp,FP_OFFSET*R_SZ(a1)
476        LDREG s0,S0_OFFSET*R_SZ(a1)
477        LDREG s1,S1_OFFSET*R_SZ(a1)
478        LDREG s2,S2_OFFSET*R_SZ(a1)
479        LDREG s3,S3_OFFSET*R_SZ(a1)
480        LDREG s4,S4_OFFSET*R_SZ(a1)
481        LDREG s5,S5_OFFSET*R_SZ(a1)
482        LDREG s6,S6_OFFSET*R_SZ(a1)
483        LDREG s7,S7_OFFSET*R_SZ(a1)
484
485        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
486
487/*      NOP */
488/*#if (__mips == 3) || (__mips == 32) */
489/*        andi  t0,SR_EXL */
490/*        bnez  t0,_CPU_Context_1 */   /* set exception level from restore context */
491/*        li    t0,~SR_EXL */
492/*        MFC0  t1,C0_SR */
493/*        NOP */
494/*        and   t1,t0 */
495/*        MTC0  t1,C0_SR */
496/* */
497/*#elif __mips == 1 */
498/* */
499/*        andi  t0,(SR_INTERRUPT_ENABLE_BITS) */ /* we know 0 disabled */
500/*        beq   t0,$0,_CPU_Context_1  */         /* set level from restore context */
501/*        MFC0  t0,C0_SR */
502/*        NOP */
503/*        or    t0,(SR_INTERRUPT_ENABLE_BITS) */ /* new_sr = old sr with enabled  */
504/*        MTC0  t0,C0_SR */                     /* set with enabled */
505/*        NOP */
506
507
508/*
509** Incorporate the incoming task's FP coprocessor state and interrupt mask/enable
510** into the status register.  We jump thru the requisite hoops to ensure we
511** maintain all other SR bits as global values.
512**
513** Get the task's FPU enable, int mask & int enable bits.  Although we keep the
514** software int enables on a per-task basis, the rtems_task_create
515** Interrupt Level & int level manipulation functions cannot enable/disable them,
516** so they are automatically enabled for all tasks.  To turn them off, a task
517** must itself manipulate the SR register.
518**
519** Although something of a hack on this processor, we treat the SR register
520** int enables as the RTEMS interrupt level.  We use the int level
521** value as a bitmask, not as any sort of greater than/less than metric.
522** Manipulation of a task's interrupt level corresponds directly to manipulation
523** of that task's SR bits, as seen in cpu.c
524**
525** Note, interrupts are disabled before context is saved, though the task's
526** interrupt enable state is recorded.  The task swapping in will apply its
527** specific SR bits, including interrupt enable.  If further task-specific
528** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and
529** cpu.h task initialization code that will be affected.
530*/
531
532        li      t2,SR_CU1
533        or      t2,SR_IMASK
534
535        /* int enable bits */
536#if (__mips == 3) || (__mips == 32)
537        /*
538        ** Save IE
539        */
540        or      t2,SR_IE
541#elif __mips == 1
542        /*
543        ** Save current, previous & old int enables.  This is key because
544        ** we can dispatch from within the stack frame used by an
545        ** interrupt service.  The int enables nest, but not beyond
546        ** previous and old because of the dispatch interlock seen
547        ** in the interrupt processing code.
548        */
549        or      t2,SR_IEC + SR_IEP + SR_IEO
550#endif
551        and     t0,t2           /* keep only the per-task bits */
552
553        mfc0    t1,C0_SR        /* grab the current SR */
554        not     t2
555        and     t1,t2           /* mask off the old task's per-task bits */
556        or      t1,t0           /* or in the new task's bits */
557        mtc0    t1,C0_SR        /* and load the new SR */
558        NOP
559
560/* _CPU_Context_1: */
561        j       ra
562        NOP
563ENDFRAME(_CPU_Context_switch)
564
565
566/*
567 *  _CPU_Context_restore
568 *
569 *  This routine is generally used only to restart self in an
570 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
571 *
572 *  NOTE: May be unnecessary to reload some registers.
573 *
574 *  void _CPU_Context_restore(
575 *    Context_Control *new_context
576 *  );
577 */
578
579FRAME(_CPU_Context_restore,sp,0,ra)
580        .set noreorder
581        move    a1,a0
582        j       _CPU_Context_switch_restore
583        NOP
584
585ENDFRAME(_CPU_Context_restore)
586
587ASM_EXTERN(_Thread_Dispatch_disable_level,4)
588ASM_EXTERN(_Thread_Executing,4)
589
590.extern _Thread_Dispatch
591.extern _ISR_Vector_table
592
593/*  void _DBG_Handler()
594 *
595 *  This routine services the (at least) MIPS1 debug vector,
596 *  only used the the hardware debugging features.  This code,
597 *  while optional, is best located here because its intrinsically
598 *  associated with exceptions in general & thus tied pretty
599 *  closely to _ISR_Handler.
600 */
601FRAME(_DBG_Handler,sp,0,ra)
602        .set noreorder
603        la      k0,_ISR_Handler
604        j       k0
605        NOP
606        .set reorder
607ENDFRAME(_DBG_Handler)
608
609/*  void __ISR_Handler()
610 *
611 *  This routine provides the RTEMS interrupt management.
612 *
613 *  void _ISR_Handler()
614 *
615 *
616 *  This discussion ignores a lot of the ugly details in a real
617 *  implementation such as saving enough registers/state to be
618 *  able to do something real.  Keep in mind that the goal is
619 *  to invoke a user's ISR handler which is written in C and
620 *  uses a certain set of registers.
621 *
622 *  Also note that the exact order is to a large extent flexible.
623 *  Hardware will dictate a sequence for a certain subset of
624 *  _ISR_Handler while requirements for setting
625 *
626 *  At entry to "common" _ISR_Handler, the vector number must be
627 *  available.  On some CPUs the hardware puts either the vector
628 *  number or the offset into the vector table for this ISR in a
629 *  known place.  If the hardware does not give us this information,
630 *  then the assembly portion of RTEMS for this port will contain
631 *  a set of distinct interrupt entry points which somehow place
632 *  the vector number in a known place (which is safe if another
633 *  interrupt nests this one) and branches to _ISR_Handler.
634 *
635 */
636
637FRAME(_ISR_Handler,sp,0,ra)
638        .set noreorder
639
640        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
641
642        /* wastes a lot of stack space for context?? */
643        ADDIU    sp,sp,-EXCP_STACK_SIZE
644
645        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
646        STREG v0, R_V0*R_SZ(sp)
647        STREG v1, R_V1*R_SZ(sp)
648        STREG a0, R_A0*R_SZ(sp)
649        STREG a1, R_A1*R_SZ(sp)
650        STREG a2, R_A2*R_SZ(sp)
651        STREG a3, R_A3*R_SZ(sp)
652        STREG t0, R_T0*R_SZ(sp)
653        STREG t1, R_T1*R_SZ(sp)
654        STREG t2, R_T2*R_SZ(sp)
655        STREG t3, R_T3*R_SZ(sp)
656        STREG t4, R_T4*R_SZ(sp)
657        STREG t5, R_T5*R_SZ(sp)
658        STREG t6, R_T6*R_SZ(sp)
659        STREG t7, R_T7*R_SZ(sp)
660        mflo  t0
661        STREG t8, R_T8*R_SZ(sp)
662        STREG t0, R_MDLO*R_SZ(sp)
663        STREG t9, R_T9*R_SZ(sp)
664        mfhi  t0
665        STREG gp, R_GP*R_SZ(sp)
666        STREG t0, R_MDHI*R_SZ(sp)
667        STREG fp, R_FP*R_SZ(sp)
668
669        .set noat
670        STREG AT, R_AT*R_SZ(sp)
671        .set at
672
673        mfc0     t0,C0_SR
674        MFCO     t1,C0_EPC
675        STREG    t0,R_SR*R_SZ(sp)
676        STREG    t1,R_EPC*R_SZ(sp)
677
678
679#ifdef INSTRUMENT_EXECUTING_THREAD
680        lw t2, THREAD_EXECUTING
681        NOP
682        sw t2, 0x8001FFF0
683#endif
684
685        /* determine if an interrupt generated this exception */
686
687        mfc0     t0,C0_CAUSE
688        NOP
689
690        and      t1,t0,CAUSE_EXCMASK
691        beq      t1, 0, _ISR_Handler_1
692
693_ISR_Handler_Exception:
694
695        /*  If we return from the exception, it is assumed nothing
696         *  bad is going on and we can continue to run normally.
697         *  But we want to save the entire CPU context so exception
698         *  handlers can look at it and change it.
699         *
700         *  NOTE: This is the path the debugger stub will take.
701         */
702
703        /* already got t0 = cause in the interrupt test above */
704        STREG    t0,R_CAUSE*R_SZ(sp)
705
706        STREG    sp, R_SP*R_SZ(sp)
707
708        STREG    s0,R_S0*R_SZ(sp)     /* save s0 - s7 */
709        STREG    s1,R_S1*R_SZ(sp)
710        STREG    s2,R_S2*R_SZ(sp)
711        STREG    s3,R_S3*R_SZ(sp)
712        STREG    s4,R_S4*R_SZ(sp)
713        STREG    s5,R_S5*R_SZ(sp)
714        STREG    s6,R_S6*R_SZ(sp)
715        STREG    s7,R_S7*R_SZ(sp)
716
717        /* CP0 special registers */
718
719#if __mips == 1
720        mfc0     t0,C0_TAR
721#endif
722        MFCO     t1,C0_BADVADDR
723
724#if __mips == 1
725        STREG    t0,R_TAR*R_SZ(sp)
726#else
727        NOP
728#endif
729        STREG    t1,R_BADVADDR*R_SZ(sp)
730
731#if ( CPU_HARDWARE_FP == TRUE )
732        mfc0     t0,C0_SR                 /* FPU is enabled, save state */
733        NOP
734        srl      t0,t0,16
735        andi     t0,t0,(SR_CU1 >> 16)
736        beqz     t0, 1f
737        NOP
738
739        la       a1,R_F0*R_SZ(sp)
740        jal      _CPU_Context_save_fp_from_exception
741        NOP
742        mfc1     t0,C1_REVISION
743        mfc1     t1,C1_STATUS
744        STREG    t0,R_FEIR*R_SZ(sp)
745        STREG    t1,R_FCSR*R_SZ(sp)
746
7471:
748#endif
749
750        move     a0,sp
751        jal      mips_vector_exceptions
752        NOP
753
754
755        /*
756        ** Note, if the exception vector returns, rely on it to have
757        ** adjusted EPC so we will return to some correct address.  If
758        ** this is not done, we might get stuck in an infinite loop because
759        ** we'll return to the instruction where the exception occured and
760        ** it could throw again.
761        **
762        ** It is expected the only code using the exception processing is
763        ** either the gdb stub or some user code which is either going to
764        ** panic or do something useful.  Regardless, it is up to each
765        ** exception routine to properly adjust EPC, so the code below
766        ** may be helpful for doing just that.
767        */
768
769/* *********************************************************************
770** this code follows the R3000's exception return logic, but is not
771** needed because the gdb stub does it for us.  It might be useful
772** for something else at some point...
773**
774        * compute the address of the instruction we'll return to *
775
776        LDREG   t1, R_CAUSE*R_SZ(sp)
777        LDREG   t0, R_EPC*R_SZ(sp)
778
779        * first see if the exception happened in the delay slot *
780        li      t3,CAUSE_BD
781        AND     t4,t1,t3
782        beqz    t4,excnodelay
783        NOP
784
785        * it did, now see if the branch occured or not *
786        li      t3,CAUSE_BT
787        AND     t4,t1,t3
788        beqz    t4,excnobranch
789        NOP
790
791        * branch was taken, we resume at the branch target *
792        LDREG   t0, R_TAR*R_SZ(sp)
793        j       excreturn
794        NOP
795
796excnobranch:
797        ADDU    t0,R_SZ
798
799excnodelay:
800        ADDU    t0,R_SZ
801
802excreturn:
803        STREG   t0, R_EPC*R_SZ(sp)
804        NOP
805********************************************************************* */
806
807
808 /* if we're returning into mips_break, move to the next instruction */
809
810        LDREG   t0,R_EPC*R_SZ(sp)
811        la      t1,mips_break
812        xor     t2,t0,t1
813        bnez    t2,3f
814
815        addu    t0,R_SZ
816        STREG   t0,R_EPC*R_SZ(sp)
817        NOP
8183:
819
820
821
822
823#if ( CPU_HARDWARE_FP == TRUE )
824        mfc0     t0,C0_SR               /* FPU is enabled, restore state */
825        NOP
826        srl      t0,t0,16
827        andi     t0,t0,(SR_CU1 >> 16)
828        beqz     t0, 2f
829        NOP
830
831        la       a1,R_F0*R_SZ(sp)
832        jal      _CPU_Context_restore_fp_from_exception
833        NOP
834        LDREG    t0,R_FEIR*R_SZ(sp)
835        LDREG    t1,R_FCSR*R_SZ(sp)
836        mtc1     t0,C1_REVISION
837        mtc1     t1,C1_STATUS
8382:
839#endif
840        LDREG    s0,R_S0*R_SZ(sp)    /* restore s0 - s7 */
841        LDREG    s1,R_S1*R_SZ(sp)
842        LDREG    s2,R_S2*R_SZ(sp)
843        LDREG    s3,R_S3*R_SZ(sp)
844        LDREG    s4,R_S4*R_SZ(sp)
845        LDREG    s5,R_S5*R_SZ(sp)
846        LDREG    s6,R_S6*R_SZ(sp)
847        LDREG    s7,R_S7*R_SZ(sp)
848
849        /* do NOT restore the sp as this could mess up the world */
850        /* do NOT restore the cause as this could mess up the world */
851
852        /*
853        ** Jump all the way out.  If theres a pending interrupt, just
854        ** let it be serviced later.  Since we're probably using the
855        ** gdb stub, we've already disrupted the ISR service timing
856        ** anyhow.  We oughtn't mix exception and interrupt processing
857        ** in the same exception call in case the exception stuff
858        ** might interfere with the dispatching & timer ticks.
859        */
860        j        _ISR_Handler_exit
861        NOP
862
863_ISR_Handler_1:
864
865        mfc0     t1,C0_SR
866        and      t0,CAUSE_IPMASK
867        and      t0,t1
868
869        /* external interrupt not enabled, ignore */
870        /* but if it's not an exception or an interrupt, */
871        /* Then where did it come from??? */
872
873        beq      t0,zero,_ISR_Handler_exit
874        NOP
875
876
877  /*
878   *  save some or all context on stack
879   *  may need to save some special interrupt information for exit
880   *
881   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
882   *    if ( _ISR_Nest_level == 0 )
883   *      switch to software interrupt stack
884   *  #endif
885   */
886
887
888  /*
889   *  _ISR_Nest_level++;
890   */
891        lw      t0,ISR_NEST_LEVEL
892        NOP
893        add     t0,t0,1
894        sw      t0,ISR_NEST_LEVEL
895  /*
896   *  _Thread_Dispatch_disable_level++;
897   */
898        lw      t1,_Thread_Dispatch_disable_level
899        NOP
900        add     t1,t1,1
901        sw      t1,_Thread_Dispatch_disable_level
902
903  /*
904   *  Call the CPU model or BSP specific routine to decode the
905   *  interrupt source and actually vector to device ISR handlers.
906   */
907
908#ifdef INSTRUMENT_ISR_VECTORING
909        NOP
910        li      t1, 1
911        sw      t1, 0x8001e000
912#endif
913
914        move     a0,sp
915        jal      mips_vector_isr_handlers
916        NOP
917
918#ifdef INSTRUMENT_ISR_VECTORING
919        li      t1, 0
920        sw      t1, 0x8001e000
921        NOP
922#endif
923
924  /*
925   *  --_ISR_Nest_level;
926   */
927        lw      t2,ISR_NEST_LEVEL
928        NOP
929        add     t2,t2,-1
930        sw      t2,ISR_NEST_LEVEL
931  /*
932   *  --_Thread_Dispatch_disable_level;
933   */
934        lw      t1,_Thread_Dispatch_disable_level
935        NOP
936        add     t1,t1,-1
937        sw      t1,_Thread_Dispatch_disable_level
938  /*
939   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
940   *    goto the label "exit interrupt (simple case)"
941   */
942        or  t0,t2,t1
943        bne t0,zero,_ISR_Handler_exit
944        NOP
945
946
947  /*
948   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
949   *    restore stack
950   *  #endif
951   *
952   *  if !_Context_Switch_necessary
953   *    goto the label "exit interrupt (simple case)"
954   */
955        lbu     t0,DISPATCH_NEEDED
956        NOP
957        or      t0,t0,t0
958        beq     t0,zero,_ISR_Handler_exit
959        NOP
960
961
962
963#ifdef INSTRUMENT_EXECUTING_THREAD
964        lw      t0,THREAD_EXECUTING
965        NOP
966        sw      t0,0x8001FFF4
967#endif
968
969/*
970** Turn on interrupts before entering Thread_Dispatch which
971** will run for a while, thus allowing new interrupts to
972** be serviced.  Observe the Thread_Dispatch_disable_level interlock
973** that prevents recursive entry into Thread_Dispatch.
974*/
975
976        mfc0    t0, C0_SR
977#if __mips == 1
978
979        li      t1,SR_IEC
980        or      t0, t1
981
982#elif (__mips == 3) || (__mips == 32)
983
984        /*
985        ** clear XL and set IE so we can get interrupts.
986        */
987        li      t1, SR_EXL
988        not     t1
989        and     t0,t1
990        or      t0, SR_IE
991
992#endif
993        mtc0    t0, C0_SR
994        NOP
995
996        /* save off our stack frame so the context switcher can get to it */
997        la      t0,__exceptionStackFrame
998        STREG   sp,(t0)
999
1000        jal     _Thread_Dispatch
1001        NOP
1002
1003        /*
1004        ** And make sure its clear in case we didn't dispatch.  if we did, its
1005        ** already cleared
1006        */
1007        la      t0,__exceptionStackFrame
1008        STREG   zero,(t0)
1009        NOP
1010
1011/*
1012** turn interrupts back off while we restore context so
1013** a badly timed interrupt won't mess things up
1014*/
1015        mfc0    t0, C0_SR
1016
1017#if __mips == 1
1018
1019        /* ints off, current & prev kernel mode on (kernel mode enabled is bit clear..argh!) */
1020        li      t1,SR_IEC | SR_KUP | SR_KUC
1021        not     t1
1022        and     t0, t1
1023        mtc0    t0, C0_SR
1024        NOP
1025
1026#elif (__mips == 3) || (__mips == 32)
1027
1028        /* make sure EXL and IE are set so ints are disabled & we can update EPC for the return */
1029        li   t1,SR_IE           /* Clear IE first (recommended) */
1030        not  t1
1031        and  t0,t1
1032        mtc0 t0,C0_SR
1033        NOP
1034 
1035        /* apply task's SR with EXL set so the eret will return properly */
1036        or      t0, SR_EXL | SR_IE
1037        mtc0    t0, C0_SR
1038        NOP
1039
1040        /* store new EPC value, which we can do since EXL=0 */
1041        LDREG   t0, R_EPC*R_SZ(sp)
1042        NOP
1043        MTCO    t0, C0_EPC
1044        NOP
1045 
1046#endif
1047
1048
1049
1050
1051
1052
1053#ifdef INSTRUMENT_EXECUTING_THREAD
1054        lw      t0,THREAD_EXECUTING
1055        NOP
1056        sw      t0,0x8001FFF8
1057#endif
1058
1059
1060  /*
1061   *  prepare to get out of interrupt
1062   *  return from interrupt  (maybe to _ISR_Dispatch)
1063   *
1064   *  LABEL "exit interrupt (simple case):"
1065   *  prepare to get out of interrupt
1066   *  return from interrupt
1067   */
1068
1069_ISR_Handler_exit:
1070/*
1071** Skip the SR restore because its a global register. _CPU_Context_switch_restore
1072** adjusts it according to each task's configuration.  If we didn't dispatch, the
1073** SR value isn't changed, so all we need to do is return.
1074**
1075*/
1076        /* restore context from stack */
1077
1078#ifdef INSTRUMENT_EXECUTING_THREAD
1079        lw      t0,THREAD_EXECUTING
1080        NOP
1081        sw      t0, 0x8001FFFC
1082#endif
1083
1084        LDREG t8, R_MDLO*R_SZ(sp)
1085        LDREG t0, R_T0*R_SZ(sp)
1086        mtlo  t8
1087        LDREG t8, R_MDHI*R_SZ(sp)
1088        LDREG t1, R_T1*R_SZ(sp)
1089        mthi  t8
1090        LDREG t2, R_T2*R_SZ(sp)
1091        LDREG t3, R_T3*R_SZ(sp)
1092        LDREG t4, R_T4*R_SZ(sp)
1093        LDREG t5, R_T5*R_SZ(sp)
1094        LDREG t6, R_T6*R_SZ(sp)
1095        LDREG t7, R_T7*R_SZ(sp)
1096        LDREG t8, R_T8*R_SZ(sp)
1097        LDREG t9, R_T9*R_SZ(sp)
1098        LDREG gp, R_GP*R_SZ(sp)
1099        LDREG fp, R_FP*R_SZ(sp)
1100        LDREG ra, R_RA*R_SZ(sp)
1101        LDREG a0, R_A0*R_SZ(sp)
1102        LDREG a1, R_A1*R_SZ(sp)
1103        LDREG a2, R_A2*R_SZ(sp)
1104        LDREG a3, R_A3*R_SZ(sp)
1105        LDREG v1, R_V1*R_SZ(sp)
1106        LDREG v0, R_V0*R_SZ(sp)
1107
1108#if __mips == 1
1109        LDREG     k1, R_EPC*R_SZ(sp)
1110#endif
1111
1112        .set noat
1113        LDREG     AT, R_AT*R_SZ(sp)
1114        .set at
1115
1116        ADDIU     sp,sp,EXCP_STACK_SIZE
1117
1118#if (__mips == 3) || (__mips == 32)
1119        eret
1120#elif __mips == 1
1121        j         k1
1122        rfe
1123#endif
1124        NOP
1125
1126       .set    reorder
1127ENDFRAME(_ISR_Handler)
1128
1129
1130FRAME(mips_break,sp,0,ra)
1131        .set noreorder
1132        break   0x0     /* this statement must be first in this function, assumed so by mips-stub.c */
1133        NOP
1134        j       ra
1135        NOP
1136       .set    reorder
1137ENDFRAME(mips_break)
1138
Note: See TracBrowser for help on using the repository browser.