source: rtems/cpukit/score/cpu/mips/cpu_asm.S @ d19cce29

4.115
Last change on this file since d19cce29 was d19cce29, checked in by Sebastian Huber <sebastian.huber@…>, on 08/05/13 at 12:54:11

score: Per-CPU thread dispatch disable level

Use a per-CPU thread dispatch disable level. So instead of one global
thread dispatch disable level we have now one instance per processor.
This is a major performance improvement for SMP. On non-SMP
configurations this may simplifiy the interrupt entry/exit code.

The giant lock is still present, but it is now decoupled from the thread
dispatching in _Thread_Dispatch(), _Thread_Handler(),
_Thread_Restart_self() and the interrupt entry/exit. Access to the
giant lock is now available via _Giant_Acquire() and _Giant_Release().
The giant lock is still implicitly acquired via
_Thread_Dispatch_decrement_disable_level().

The giant lock is only acquired for high-level operations in interrupt
handlers (e.g. release of a semaphore, sending of an event).

As a side-effect this change fixes the lost thread dispatch necessary
indication bug in _Thread_Dispatch().

A per-CPU thread dispatch disable level greatly simplifies the SMP
support for the interrupt entry/exit code since no spin locks have to be
acquired in this area. It is only necessary to get the current
processor index and use this to calculate the address of the own per-CPU
control. This reduces the interrupt latency considerably.

All elements for the interrupt entry/exit code are now part of the
Per_CPU_Control structure: thread dispatch disable level, ISR nest level
and thread dispatch necessary. Nothing else is required (except CPU
port specific stuff like on SPARC).

  • Property mode set to 100644
File size: 30.1 KB
Line 
1/*
2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
22 *          the baseline of the more general MIPS port.
23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
26 *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
27 *          performance, tweaking this code and the isr vectoring routines
28 *          to reduce overhead & latencies.  Added optional
29 *          instrumentation as well.
30 *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
31 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
32 *          and deferred FP contexts.
33 *    2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing
34 *          by increasing the amount of context saved/restored.
35 *    2004: 24March, Art Ferrer, NASA/GSFC, added save of FP status/control
36 *          register to fix intermittent FP error encountered on ST5 mission
37 *          implementation on Mongoose V processor.
38 *    2004: April 7, Greg Menke <gregory.menke@gsfc.nasa.gov> Added __mips==32
39 *          support for R4000 processors running 32 bit code.  Fixed #define
40 *          problems that caused fpu code to always be included even when no
41 *          fpu is present.
42 *
43 *  COPYRIGHT (c) 1989-2002.
44 *  On-Line Applications Research Corporation (OAR).
45 *
46 *  The license and distribution terms for this file may be
47 *  found in the file LICENSE in this distribution or at
48 *  http://www.rtems.com/license/LICENSE.
49 */
50
51#ifdef HAVE_CONFIG_H
52#include "config.h"
53#endif
54
55#include <rtems/asm.h>
56#include <rtems/mips/iregdef.h>
57#include <rtems/mips/idtcpu.h>
58#include <rtems/score/percpu.h>
59
60#define ASSEMBLY_ONLY
61#include <rtems/score/cpu.h>
62
63#if TRUE
64#else
65#error TRUE is not true
66#endif
67#if FALSE
68#error FALSE is not false
69#else
70#endif
71
72/*
73#if ( CPU_HARDWARE_FP == TRUE )
74#warning CPU_HARDWARE_FP == TRUE
75#else
76#warning CPU_HARDWARE_FP != TRUE
77#endif
78*/
79
80
81/* enable debugging shadow writes to misc ram, this is a vestigal
82* Mongoose-ism debug tool- but may be handy in the future so we
83* left it in...
84*/
85
86/* #define INSTRUMENT_ISR_VECTORING */
87/* #define INSTRUMENT_EXECUTING_THREAD */
88
89
90
91/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
92 *  and MIPS ISA Level 1 (R3xxx).
93 */
94
95#if __mips == 3
96/* 64 bit register operations */
97#define NOP     nop
98#define ADD     dadd
99#define STREG   sd
100#define LDREG   ld
101#define MFCO    dmfc0           /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
102#define MTCO    dmtc0           /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
103#define ADDU    addu
104#define ADDIU   addiu
105#if (__mips_fpr==32)
106#define STREGC1 swc1
107#define LDREGC1 lwc1
108#elif (__mips_fpr==64)          /* Use these instructions if there are 64 bit floating point registers. This requires FR bit to be set in C0_SR */
109#define STREGC1 sdc1
110#define LDREGC1 ldc1
111#endif
112#define R_SZ    8
113#define F_SZ    8
114#define SZ_INT  8
115#define SZ_INT_POW2 3
116
117/* XXX if we don't always want 64 bit register ops, then another ifdef */
118
119#elif (__mips == 1 ) || (__mips == 32)
120/* 32 bit register operations*/
121#define NOP     nop
122#define ADD     add
123#define STREG   sw
124#define LDREG   lw
125#define MFCO    mfc0
126#define MTCO    mtc0
127#define ADDU    add
128#define ADDIU   addi
129#define STREGC1 swc1
130#define LDREGC1 lwc1
131#define R_SZ    4
132#define F_SZ    4
133#define SZ_INT  4
134#define SZ_INT_POW2 2
135#else
136#error "mips assembly: what size registers do I deal with?"
137#endif
138
139
140#define ISR_VEC_SIZE    4
141#define EXCP_STACK_SIZE (NREGS*R_SZ)
142
143
144#ifdef __GNUC__
145#define ASM_EXTERN(x,size) .extern x,size
146#else
147#define ASM_EXTERN(x,size)
148#endif
149
150/* NOTE: these constants must match the Context_Control structure in cpu.h */
151#define S0_OFFSET 0
152#define S1_OFFSET 1
153#define S2_OFFSET 2
154#define S3_OFFSET 3
155#define S4_OFFSET 4
156#define S5_OFFSET 5
157#define S6_OFFSET 6
158#define S7_OFFSET 7
159#define SP_OFFSET 8
160#define FP_OFFSET 9
161#define RA_OFFSET 10
162#define C0_SR_OFFSET 11
163#define C0_EPC_OFFSET 12
164
165/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
166#define FP0_OFFSET  0
167#define FP1_OFFSET  1
168#define FP2_OFFSET  2
169#define FP3_OFFSET  3
170#define FP4_OFFSET  4
171#define FP5_OFFSET  5
172#define FP6_OFFSET  6
173#define FP7_OFFSET  7
174#define FP8_OFFSET  8
175#define FP9_OFFSET  9
176#define FP10_OFFSET 10
177#define FP11_OFFSET 11
178#define FP12_OFFSET 12
179#define FP13_OFFSET 13
180#define FP14_OFFSET 14
181#define FP15_OFFSET 15
182#define FP16_OFFSET 16
183#define FP17_OFFSET 17
184#define FP18_OFFSET 18
185#define FP19_OFFSET 19
186#define FP20_OFFSET 20
187#define FP21_OFFSET 21
188#define FP22_OFFSET 22
189#define FP23_OFFSET 23
190#define FP24_OFFSET 24
191#define FP25_OFFSET 25
192#define FP26_OFFSET 26
193#define FP27_OFFSET 27
194#define FP28_OFFSET 28
195#define FP29_OFFSET 29
196#define FP30_OFFSET 30
197#define FP31_OFFSET 31
198#define FPCS_OFFSET 32
199
200
201ASM_EXTERN(__exceptionStackFrame, SZ_INT)
202
203/*
204 *  _CPU_Context_save_fp_context
205 *
206 *  This routine is responsible for saving the FP context
207 *  at *fp_context_ptr.  If the point to load the FP context
208 *  from is changed then the pointer is modified by this routine.
209 *
210 *  Sometimes a macro implementation of this is in cpu.h which dereferences
211 *  the ** and a similarly named routine in this file is passed something
212 *  like a (Context_Control_fp *).  The general rule on making this decision
213 *  is to avoid writing assembly language.
214 */
215
216/* void _CPU_Context_save_fp(
217 *   void **fp_context_ptr
218 * );
219 */
220
221#if ( CPU_HARDWARE_FP == TRUE )
222FRAME(_CPU_Context_save_fp,sp,0,ra)
223        .set noreorder
224        .set noat
225
226        /*
227        ** Make sure the FPU is on before we save state.  This code
228        ** is here because the FPU context switch might occur when an
229        ** integer task is switching out with a FP task switching in.
230        */
231        mfc0    t0,C0_SR
232        li      t2,SR_CU1
233        move    t1,t0
234        or      t0,t2           /* turn on the fpu */
235#if (__mips == 3) || (__mips == 32)
236        li      t2,SR_IE
237#elif __mips == 1
238        li      t2,SR_IEC
239#endif
240        not     t2
241        and     t0,t2           /* turn off interrupts */
242        mtc0    t0,C0_SR
243
244        lw      a1,(a0)         /* get address of context storage area */
245        move    t0,ra
246        jal     _CPU_Context_save_fp_from_exception
247        NOP
248
249        /*
250        ** Reassert the task's state because we've not saved it yet.
251        */
252        mtc0    t1,C0_SR
253        j       t0
254        NOP
255
256        .globl _CPU_Context_save_fp_from_exception
257_CPU_Context_save_fp_from_exception:
258        STREGC1 $f0,FP0_OFFSET*F_SZ(a1)
259        STREGC1 $f1,FP1_OFFSET*F_SZ(a1)
260        STREGC1 $f2,FP2_OFFSET*F_SZ(a1)
261        STREGC1 $f3,FP3_OFFSET*F_SZ(a1)
262        STREGC1 $f4,FP4_OFFSET*F_SZ(a1)
263        STREGC1 $f5,FP5_OFFSET*F_SZ(a1)
264        STREGC1 $f6,FP6_OFFSET*F_SZ(a1)
265        STREGC1 $f7,FP7_OFFSET*F_SZ(a1)
266        STREGC1 $f8,FP8_OFFSET*F_SZ(a1)
267        STREGC1 $f9,FP9_OFFSET*F_SZ(a1)
268        STREGC1 $f10,FP10_OFFSET*F_SZ(a1)
269        STREGC1 $f11,FP11_OFFSET*F_SZ(a1)
270        STREGC1 $f12,FP12_OFFSET*F_SZ(a1)
271        STREGC1 $f13,FP13_OFFSET*F_SZ(a1)
272        STREGC1 $f14,FP14_OFFSET*F_SZ(a1)
273        STREGC1 $f15,FP15_OFFSET*F_SZ(a1)
274        STREGC1 $f16,FP16_OFFSET*F_SZ(a1)
275        STREGC1 $f17,FP17_OFFSET*F_SZ(a1)
276        STREGC1 $f18,FP18_OFFSET*F_SZ(a1)
277        STREGC1 $f19,FP19_OFFSET*F_SZ(a1)
278        STREGC1 $f20,FP20_OFFSET*F_SZ(a1)
279        STREGC1 $f21,FP21_OFFSET*F_SZ(a1)
280        STREGC1 $f22,FP22_OFFSET*F_SZ(a1)
281        STREGC1 $f23,FP23_OFFSET*F_SZ(a1)
282        STREGC1 $f24,FP24_OFFSET*F_SZ(a1)
283        STREGC1 $f25,FP25_OFFSET*F_SZ(a1)
284        STREGC1 $f26,FP26_OFFSET*F_SZ(a1)
285        STREGC1 $f27,FP27_OFFSET*F_SZ(a1)
286        STREGC1 $f28,FP28_OFFSET*F_SZ(a1)
287        STREGC1 $f29,FP29_OFFSET*F_SZ(a1)
288        STREGC1 $f30,FP30_OFFSET*F_SZ(a1)
289        STREGC1 $f31,FP31_OFFSET*F_SZ(a1)
290        cfc1 a0,$31                    /* Read FP status/conrol reg */
291        cfc1 a0,$31                    /* Two reads clear pipeline */
292        NOP
293        NOP
294        sw a0, FPCS_OFFSET*F_SZ(a1)    /* Store value to FPCS location */
295        NOP
296        j ra
297        NOP
298        .set at
299ENDFRAME(_CPU_Context_save_fp)
300#endif
301
302/*
303 *  _CPU_Context_restore_fp_context
304 *
305 *  This routine is responsible for restoring the FP context
306 *  at *fp_context_ptr.  If the point to load the FP context
307 *  from is changed then the pointer is modified by this routine.
308 *
309 *  Sometimes a macro implementation of this is in cpu.h which dereferences
310 *  the ** and a similarly named routine in this file is passed something
311 *  like a (Context_Control_fp *).  The general rule on making this decision
312 *  is to avoid writing assembly language.
313 */
314
315/* void _CPU_Context_restore_fp(
316 *   void **fp_context_ptr
317 * )
318 */
319
320#if ( CPU_HARDWARE_FP == TRUE )
321FRAME(_CPU_Context_restore_fp,sp,0,ra)
322        .set noat
323        .set noreorder
324
325        /*
326        ** Make sure the FPU is on before we retrieve state.  This code
327        ** is here because the FPU context switch might occur when an
328        ** integer task is switching out with a FP task switching in.
329        */
330        mfc0    t0,C0_SR
331        li      t2,SR_CU1
332        move    t1,t0
333        or      t0,t2           /* turn on the fpu */
334#if (__mips == 3) || (__mips == 32)
335        li      t2,SR_IE
336#elif __mips == 1
337        li      t2,SR_IEC
338#endif
339        not     t2
340        and     t0,t2           /* turn off interrupts */
341        mtc0    t0,C0_SR
342
343        lw      a1,(a0)         /* get address of context storage area */
344        move    t0,ra
345        jal     _CPU_Context_restore_fp_from_exception
346        NOP
347
348        /*
349        ** Reassert the old task's state because we've not restored the
350        ** new one yet.
351        */
352        mtc0    t1,C0_SR
353        j       t0
354        NOP
355
356        .globl _CPU_Context_restore_fp_from_exception
357_CPU_Context_restore_fp_from_exception:
358        LDREGC1 $f0,FP0_OFFSET*F_SZ(a1)
359        LDREGC1 $f1,FP1_OFFSET*F_SZ(a1)
360        LDREGC1 $f2,FP2_OFFSET*F_SZ(a1)
361        LDREGC1 $f3,FP3_OFFSET*F_SZ(a1)
362        LDREGC1 $f4,FP4_OFFSET*F_SZ(a1)
363        LDREGC1 $f5,FP5_OFFSET*F_SZ(a1)
364        LDREGC1 $f6,FP6_OFFSET*F_SZ(a1)
365        LDREGC1 $f7,FP7_OFFSET*F_SZ(a1)
366        LDREGC1 $f8,FP8_OFFSET*F_SZ(a1)
367        LDREGC1 $f9,FP9_OFFSET*F_SZ(a1)
368        LDREGC1 $f10,FP10_OFFSET*F_SZ(a1)
369        LDREGC1 $f11,FP11_OFFSET*F_SZ(a1)
370        LDREGC1 $f12,FP12_OFFSET*F_SZ(a1)
371        LDREGC1 $f13,FP13_OFFSET*F_SZ(a1)
372        LDREGC1 $f14,FP14_OFFSET*F_SZ(a1)
373        LDREGC1 $f15,FP15_OFFSET*F_SZ(a1)
374        LDREGC1 $f16,FP16_OFFSET*F_SZ(a1)
375        LDREGC1 $f17,FP17_OFFSET*F_SZ(a1)
376        LDREGC1 $f18,FP18_OFFSET*F_SZ(a1)
377        LDREGC1 $f19,FP19_OFFSET*F_SZ(a1)
378        LDREGC1 $f20,FP20_OFFSET*F_SZ(a1)
379        LDREGC1 $f21,FP21_OFFSET*F_SZ(a1)
380        LDREGC1 $f22,FP22_OFFSET*F_SZ(a1)
381        LDREGC1 $f23,FP23_OFFSET*F_SZ(a1)
382        LDREGC1 $f24,FP24_OFFSET*F_SZ(a1)
383        LDREGC1 $f25,FP25_OFFSET*F_SZ(a1)
384        LDREGC1 $f26,FP26_OFFSET*F_SZ(a1)
385        LDREGC1 $f27,FP27_OFFSET*F_SZ(a1)
386        LDREGC1 $f28,FP28_OFFSET*F_SZ(a1)
387        LDREGC1 $f29,FP29_OFFSET*F_SZ(a1)
388        LDREGC1 $f30,FP30_OFFSET*F_SZ(a1)
389        LDREGC1 $f31,FP31_OFFSET*F_SZ(a1)
390        cfc1 a0,$31                  /* Read from FP status/control reg */
391        cfc1 a0,$31                  /* Two reads clear pipeline */
392        NOP                          /* NOPs ensure execution */
393        NOP
394        lw a0,FPCS_OFFSET*F_SZ(a1)   /* Load saved FPCS value */
395        NOP
396        ctc1 a0,$31                  /* Restore FPCS register */
397        NOP
398        j ra
399        NOP
400        .set at
401ENDFRAME(_CPU_Context_restore_fp)
402#endif
403
404/*  _CPU_Context_switch
405 *
406 *  This routine performs a normal non-FP context switch.
407 */
408
409/* void _CPU_Context_switch(
410 *   Context_Control  *run,
411 *   Context_Control  *heir
412 * )
413 */
414
415FRAME(_CPU_Context_switch,sp,0,ra)
416        .set noreorder
417
418        mfc0    t0,C0_SR
419#if (__mips == 3) || (__mips == 32)
420        li      t1,SR_IE
421#elif __mips == 1
422        li      t1,SR_IEC
423#endif
424        STREG   t0,C0_SR_OFFSET*R_SZ(a0)        /* save the task's SR */
425        not     t1
426        and     t0,t1                           /* mask off interrupts while we context switch */
427        mtc0    t0,C0_SR
428        NOP
429
430        STREG ra,RA_OFFSET*R_SZ(a0)             /* save current context */
431        STREG sp,SP_OFFSET*R_SZ(a0)
432        STREG fp,FP_OFFSET*R_SZ(a0)
433        STREG s0,S0_OFFSET*R_SZ(a0)
434        STREG s1,S1_OFFSET*R_SZ(a0)
435        STREG s2,S2_OFFSET*R_SZ(a0)
436        STREG s3,S3_OFFSET*R_SZ(a0)
437        STREG s4,S4_OFFSET*R_SZ(a0)
438        STREG s5,S5_OFFSET*R_SZ(a0)
439        STREG s6,S6_OFFSET*R_SZ(a0)
440        STREG s7,S7_OFFSET*R_SZ(a0)
441
442
443        /*
444        ** this code grabs the userspace EPC if we're dispatching from
445        ** an interrupt frame or supplies the address of the dispatch
446        ** routines if not.  This is entirely for the gdbstub's benefit so
447        ** it can know where each task is running.
448        **
449        ** Its value is only set when calling threadDispatch from
450        ** the interrupt handler and is cleared immediately when this
451        ** routine gets it.
452        */
453
454        la      t0,__exceptionStackFrame        /* see if we're coming in from an exception */
455        LDREG   t1, (t0)
456        NOP
457        beqz    t1,1f
458
459        STREG   zero, (t0)                      /* and clear it */
460        NOP
461        LDREG   t0,R_EPC*R_SZ(t1)               /* get the userspace EPC from the frame */
462        b       2f
463        NOP
464
4651:      la      t0,_Thread_Dispatch             /* if ==0, we're switched out */
466
4672:      STREG   t0,C0_EPC_OFFSET*R_SZ(a0)
468
469
470_CPU_Context_switch_restore:
471        LDREG ra,RA_OFFSET*R_SZ(a1)             /* restore context */
472        LDREG sp,SP_OFFSET*R_SZ(a1)
473        LDREG fp,FP_OFFSET*R_SZ(a1)
474        LDREG s0,S0_OFFSET*R_SZ(a1)
475        LDREG s1,S1_OFFSET*R_SZ(a1)
476        LDREG s2,S2_OFFSET*R_SZ(a1)
477        LDREG s3,S3_OFFSET*R_SZ(a1)
478        LDREG s4,S4_OFFSET*R_SZ(a1)
479        LDREG s5,S5_OFFSET*R_SZ(a1)
480        LDREG s6,S6_OFFSET*R_SZ(a1)
481        LDREG s7,S7_OFFSET*R_SZ(a1)
482
483        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
484
485/*      NOP */
486/*#if (__mips == 3) || (__mips == 32) */
487/*        andi  t0,SR_EXL */
488/*        bnez  t0,_CPU_Context_1 */   /* set exception level from restore context */
489/*        li    t0,~SR_EXL */
490/*        MFC0  t1,C0_SR */
491/*        NOP */
492/*        and   t1,t0 */
493/*        MTC0  t1,C0_SR */
494/* */
495/*#elif __mips == 1 */
496/* */
497/*        andi  t0,(SR_INTERRUPT_ENABLE_BITS) */ /* we know 0 disabled */
498/*        beq   t0,$0,_CPU_Context_1  */         /* set level from restore context */
499/*        MFC0  t0,C0_SR */
500/*        NOP */
501/*        or    t0,(SR_INTERRUPT_ENABLE_BITS) */ /* new_sr = old sr with enabled  */
502/*        MTC0  t0,C0_SR */                     /* set with enabled */
503/*        NOP */
504
505
506/*
507** Incorporate the incoming task's FP coprocessor state and interrupt mask/enable
508** into the status register.  We jump thru the requisite hoops to ensure we
509** maintain all other SR bits as global values.
510**
511** Get the task's FPU enable, int mask & int enable bits.  Although we keep the
512** software int enables on a per-task basis, the rtems_task_create
513** Interrupt Level & int level manipulation functions cannot enable/disable them,
514** so they are automatically enabled for all tasks.  To turn them off, a task
515** must itself manipulate the SR register.
516**
517** Although something of a hack on this processor, we treat the SR register
518** int enables as the RTEMS interrupt level.  We use the int level
519** value as a bitmask, not as any sort of greater than/less than metric.
520** Manipulation of a task's interrupt level corresponds directly to manipulation
521** of that task's SR bits, as seen in cpu.c
522**
523** Note, interrupts are disabled before context is saved, though the task's
524** interrupt enable state is recorded.  The task swapping in will apply its
525** specific SR bits, including interrupt enable.  If further task-specific
526** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and
527** cpu.h task initialization code that will be affected.
528*/
529
530        li      t2,SR_CU1
531        or      t2,SR_IMASK
532
533        /* int enable bits */
534#if (__mips == 3) || (__mips == 32)
535        /*
536        ** Save IE
537        */
538        or      t2,SR_IE
539#elif __mips == 1
540        /*
541        ** Save current, previous & old int enables.  This is key because
542        ** we can dispatch from within the stack frame used by an
543        ** interrupt service.  The int enables nest, but not beyond
544        ** previous and old because of the dispatch interlock seen
545        ** in the interrupt processing code.
546        */
547        or      t2,SR_IEC + SR_IEP + SR_IEO
548#endif
549        and     t0,t2           /* keep only the per-task bits */
550
551        mfc0    t1,C0_SR        /* grab the current SR */
552        not     t2
553        and     t1,t2           /* mask off the old task's per-task bits */
554        or      t1,t0           /* or in the new task's bits */
555        mtc0    t1,C0_SR        /* and load the new SR */
556        NOP
557
558/* _CPU_Context_1: */
559        j       ra
560        NOP
561ENDFRAME(_CPU_Context_switch)
562
563
564/*
565 *  _CPU_Context_restore
566 *
567 *  This routine is generally used only to restart self in an
568 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
569 *
570 *  NOTE: May be unnecessary to reload some registers.
571 *
572 *  void _CPU_Context_restore(
573 *    Context_Control *new_context
574 *  );
575 */
576
577FRAME(_CPU_Context_restore,sp,0,ra)
578        .set noreorder
579        move    a1,a0
580        j       _CPU_Context_switch_restore
581        NOP
582
583ENDFRAME(_CPU_Context_restore)
584
585.extern _Thread_Dispatch
586.extern _ISR_Vector_table
587
588/*  void _DBG_Handler()
589 *
590 *  This routine services the (at least) MIPS1 debug vector,
591 *  only used the the hardware debugging features.  This code,
592 *  while optional, is best located here because its intrinsically
593 *  associated with exceptions in general & thus tied pretty
594 *  closely to _ISR_Handler.
595 */
596FRAME(_DBG_Handler,sp,0,ra)
597        .set noreorder
598        la      k0,_ISR_Handler
599        j       k0
600        NOP
601        .set reorder
602ENDFRAME(_DBG_Handler)
603
604/*  void __ISR_Handler()
605 *
606 *  This routine provides the RTEMS interrupt management.
607 *
608 *  void _ISR_Handler()
609 *
610 *
611 *  This discussion ignores a lot of the ugly details in a real
612 *  implementation such as saving enough registers/state to be
613 *  able to do something real.  Keep in mind that the goal is
614 *  to invoke a user's ISR handler which is written in C and
615 *  uses a certain set of registers.
616 *
617 *  Also note that the exact order is to a large extent flexible.
618 *  Hardware will dictate a sequence for a certain subset of
619 *  _ISR_Handler while requirements for setting
620 *
621 *  At entry to "common" _ISR_Handler, the vector number must be
622 *  available.  On some CPUs the hardware puts either the vector
623 *  number or the offset into the vector table for this ISR in a
624 *  known place.  If the hardware does not give us this information,
625 *  then the assembly portion of RTEMS for this port will contain
626 *  a set of distinct interrupt entry points which somehow place
627 *  the vector number in a known place (which is safe if another
628 *  interrupt nests this one) and branches to _ISR_Handler.
629 *
630 */
631
632FRAME(_ISR_Handler,sp,0,ra)
633        .set noreorder
634
635        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
636
637        /* wastes a lot of stack space for context?? */
638        ADDIU    sp,sp,-EXCP_STACK_SIZE
639
640        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
641        STREG v0, R_V0*R_SZ(sp)
642        STREG v1, R_V1*R_SZ(sp)
643        STREG a0, R_A0*R_SZ(sp)
644        STREG a1, R_A1*R_SZ(sp)
645        STREG a2, R_A2*R_SZ(sp)
646        STREG a3, R_A3*R_SZ(sp)
647        STREG t0, R_T0*R_SZ(sp)
648        STREG t1, R_T1*R_SZ(sp)
649        STREG t2, R_T2*R_SZ(sp)
650        STREG t3, R_T3*R_SZ(sp)
651        STREG t4, R_T4*R_SZ(sp)
652        STREG t5, R_T5*R_SZ(sp)
653        STREG t6, R_T6*R_SZ(sp)
654        STREG t7, R_T7*R_SZ(sp)
655        mflo  t0
656        STREG t8, R_T8*R_SZ(sp)
657        STREG t0, R_MDLO*R_SZ(sp)
658        STREG t9, R_T9*R_SZ(sp)
659        mfhi  t0
660        STREG gp, R_GP*R_SZ(sp)
661        STREG t0, R_MDHI*R_SZ(sp)
662        STREG fp, R_FP*R_SZ(sp)
663
664        .set noat
665        STREG AT, R_AT*R_SZ(sp)
666        .set at
667
668        mfc0     t0,C0_SR
669        MFCO     t1,C0_EPC
670        STREG    t0,R_SR*R_SZ(sp)
671        STREG    t1,R_EPC*R_SZ(sp)
672
673
674#ifdef INSTRUMENT_EXECUTING_THREAD
675        lw t2, THREAD_EXECUTING
676        NOP
677        sw t2, 0x8001FFF0
678#endif
679
680        /* determine if an interrupt generated this exception */
681
682        mfc0     t0,C0_CAUSE
683        NOP
684
685        and      t1,t0,CAUSE_EXCMASK
686        beq      t1, 0, _ISR_Handler_1
687
688_ISR_Handler_Exception:
689
690        /*  If we return from the exception, it is assumed nothing
691         *  bad is going on and we can continue to run normally.
692         *  But we want to save the entire CPU context so exception
693         *  handlers can look at it and change it.
694         *
695         *  NOTE: This is the path the debugger stub will take.
696         */
697
698        /* already got t0 = cause in the interrupt test above */
699        STREG    t0,R_CAUSE*R_SZ(sp)
700
701        STREG    sp, R_SP*R_SZ(sp)
702
703        STREG    s0,R_S0*R_SZ(sp)     /* save s0 - s7 */
704        STREG    s1,R_S1*R_SZ(sp)
705        STREG    s2,R_S2*R_SZ(sp)
706        STREG    s3,R_S3*R_SZ(sp)
707        STREG    s4,R_S4*R_SZ(sp)
708        STREG    s5,R_S5*R_SZ(sp)
709        STREG    s6,R_S6*R_SZ(sp)
710        STREG    s7,R_S7*R_SZ(sp)
711
712        /* CP0 special registers */
713
714#if __mips == 1
715        mfc0     t0,C0_TAR
716#endif
717        MFCO     t1,C0_BADVADDR
718
719#if __mips == 1
720        STREG    t0,R_TAR*R_SZ(sp)
721#else
722        NOP
723#endif
724        STREG    t1,R_BADVADDR*R_SZ(sp)
725
726#if ( CPU_HARDWARE_FP == TRUE )
727        mfc0     t0,C0_SR                 /* FPU is enabled, save state */
728        NOP
729        srl      t0,t0,16
730        andi     t0,t0,(SR_CU1 >> 16)
731        beqz     t0, 1f
732        NOP
733
734        la       a1,R_F0*R_SZ(sp)
735        jal      _CPU_Context_save_fp_from_exception
736        NOP
737        mfc1     t0,C1_REVISION
738        mfc1     t1,C1_STATUS
739        STREG    t0,R_FEIR*R_SZ(sp)
740        STREG    t1,R_FCSR*R_SZ(sp)
741
7421:
743#endif
744
745        move     a0,sp
746        jal      mips_vector_exceptions
747        NOP
748
749
750        /*
751        ** Note, if the exception vector returns, rely on it to have
752        ** adjusted EPC so we will return to some correct address.  If
753        ** this is not done, we might get stuck in an infinite loop because
754        ** we'll return to the instruction where the exception occured and
755        ** it could throw again.
756        **
757        ** It is expected the only code using the exception processing is
758        ** either the gdb stub or some user code which is either going to
759        ** panic or do something useful.  Regardless, it is up to each
760        ** exception routine to properly adjust EPC, so the code below
761        ** may be helpful for doing just that.
762        */
763
764/* *********************************************************************
765** this code follows the R3000's exception return logic, but is not
766** needed because the gdb stub does it for us.  It might be useful
767** for something else at some point...
768**
769        * compute the address of the instruction we'll return to *
770
771        LDREG   t1, R_CAUSE*R_SZ(sp)
772        LDREG   t0, R_EPC*R_SZ(sp)
773
774        * first see if the exception happened in the delay slot *
775        li      t3,CAUSE_BD
776        AND     t4,t1,t3
777        beqz    t4,excnodelay
778        NOP
779
780        * it did, now see if the branch occured or not *
781        li      t3,CAUSE_BT
782        AND     t4,t1,t3
783        beqz    t4,excnobranch
784        NOP
785
786        * branch was taken, we resume at the branch target *
787        LDREG   t0, R_TAR*R_SZ(sp)
788        j       excreturn
789        NOP
790
791excnobranch:
792        ADDU    t0,R_SZ
793
794excnodelay:
795        ADDU    t0,R_SZ
796
797excreturn:
798        STREG   t0, R_EPC*R_SZ(sp)
799        NOP
800********************************************************************* */
801
802
803 /* if we're returning into mips_break, move to the next instruction */
804
805        LDREG   t0,R_EPC*R_SZ(sp)
806        la      t1,mips_break
807        xor     t2,t0,t1
808        bnez    t2,3f
809
810        addu    t0,R_SZ
811        STREG   t0,R_EPC*R_SZ(sp)
812        NOP
8133:
814
815
816
817
818#if ( CPU_HARDWARE_FP == TRUE )
819        mfc0     t0,C0_SR               /* FPU is enabled, restore state */
820        NOP
821        srl      t0,t0,16
822        andi     t0,t0,(SR_CU1 >> 16)
823        beqz     t0, 2f
824        NOP
825
826        la       a1,R_F0*R_SZ(sp)
827        jal      _CPU_Context_restore_fp_from_exception
828        NOP
829        LDREG    t0,R_FEIR*R_SZ(sp)
830        LDREG    t1,R_FCSR*R_SZ(sp)
831        mtc1     t0,C1_REVISION
832        mtc1     t1,C1_STATUS
8332:
834#endif
835        LDREG    s0,R_S0*R_SZ(sp)    /* restore s0 - s7 */
836        LDREG    s1,R_S1*R_SZ(sp)
837        LDREG    s2,R_S2*R_SZ(sp)
838        LDREG    s3,R_S3*R_SZ(sp)
839        LDREG    s4,R_S4*R_SZ(sp)
840        LDREG    s5,R_S5*R_SZ(sp)
841        LDREG    s6,R_S6*R_SZ(sp)
842        LDREG    s7,R_S7*R_SZ(sp)
843
844        /* do NOT restore the sp as this could mess up the world */
845        /* do NOT restore the cause as this could mess up the world */
846
847        /*
848        ** Jump all the way out.  If theres a pending interrupt, just
849        ** let it be serviced later.  Since we're probably using the
850        ** gdb stub, we've already disrupted the ISR service timing
851        ** anyhow.  We oughtn't mix exception and interrupt processing
852        ** in the same exception call in case the exception stuff
853        ** might interfere with the dispatching & timer ticks.
854        */
855        j        _ISR_Handler_exit
856        NOP
857
858_ISR_Handler_1:
859
860        mfc0     t1,C0_SR
861        and      t0,CAUSE_IPMASK
862        and      t0,t1
863
864        /* external interrupt not enabled, ignore */
865        /* but if it's not an exception or an interrupt, */
866        /* Then where did it come from??? */
867
868        beq      t0,zero,_ISR_Handler_exit
869        NOP
870
871
872  /*
873   *  save some or all context on stack
874   *  may need to save some special interrupt information for exit
875   *
876   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
877   *    if ( _ISR_Nest_level == 0 )
878   *      switch to software interrupt stack
879   *  #endif
880   */
881
882
883  /*
884   *  _ISR_Nest_level++;
885   */
886        lw      t0,ISR_NEST_LEVEL
887        NOP
888        add     t0,t0,1
889        sw      t0,ISR_NEST_LEVEL
890  /*
891   *  _Thread_Dispatch_disable_level++;
892   */
893        lw      t1,THREAD_DISPATCH_DISABLE_LEVEL
894        NOP
895        add     t1,t1,1
896        sw      t1,THREAD_DISPATCH_DISABLE_LEVEL
897
898  /*
899   *  Call the CPU model or BSP specific routine to decode the
900   *  interrupt source and actually vector to device ISR handlers.
901   */
902
903#ifdef INSTRUMENT_ISR_VECTORING
904        NOP
905        li      t1, 1
906        sw      t1, 0x8001e000
907#endif
908
909        move     a0,sp
910        jal      mips_vector_isr_handlers
911        NOP
912
913#ifdef INSTRUMENT_ISR_VECTORING
914        li      t1, 0
915        sw      t1, 0x8001e000
916        NOP
917#endif
918
919  /*
920   *  --_ISR_Nest_level;
921   */
922        lw      t2,ISR_NEST_LEVEL
923        NOP
924        add     t2,t2,-1
925        sw      t2,ISR_NEST_LEVEL
926  /*
927   *  --_Thread_Dispatch_disable_level;
928   */
929        lw      t1,THREAD_DISPATCH_DISABLE_LEVEL
930        NOP
931        add     t1,t1,-1
932        sw      t1,THREAD_DISPATCH_DISABLE_LEVEL
933  /*
934   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
935   *    goto the label "exit interrupt (simple case)"
936   */
937        or  t0,t2,t1
938        bne t0,zero,_ISR_Handler_exit
939        NOP
940
941
942  /*
943   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
944   *    restore stack
945   *  #endif
946   *
947   *  if !_Thread_Dispatch_necessary
948   *    goto the label "exit interrupt (simple case)"
949   */
950        lbu     t0,DISPATCH_NEEDED
951        NOP
952        or      t0,t0,t0
953        beq     t0,zero,_ISR_Handler_exit
954        NOP
955
956
957
958#ifdef INSTRUMENT_EXECUTING_THREAD
959        lw      t0,THREAD_EXECUTING
960        NOP
961        sw      t0,0x8001FFF4
962#endif
963
964/*
965** Turn on interrupts before entering Thread_Dispatch which
966** will run for a while, thus allowing new interrupts to
967** be serviced.  Observe the Thread_Dispatch_disable_level interlock
968** that prevents recursive entry into Thread_Dispatch.
969*/
970
971        mfc0    t0, C0_SR
972#if __mips == 1
973
974        li      t1,SR_IEC
975        or      t0, t1
976
977#elif (__mips == 3) || (__mips == 32)
978
979        /*
980        ** clear XL and set IE so we can get interrupts.
981        */
982        li      t1, SR_EXL
983        not     t1
984        and     t0,t1
985        or      t0, SR_IE
986
987#endif
988        mtc0    t0, C0_SR
989        NOP
990
991        /* save off our stack frame so the context switcher can get to it */
992        la      t0,__exceptionStackFrame
993        STREG   sp,(t0)
994
995        jal     _Thread_Dispatch
996        NOP
997
998        /*
999        ** And make sure its clear in case we didn't dispatch.  if we did, its
1000        ** already cleared
1001        */
1002        la      t0,__exceptionStackFrame
1003        STREG   zero,(t0)
1004        NOP
1005
1006/*
1007** turn interrupts back off while we restore context so
1008** a badly timed interrupt won't mess things up
1009*/
1010        mfc0    t0, C0_SR
1011
1012#if __mips == 1
1013
1014        /* ints off, current & prev kernel mode on (kernel mode enabled is bit clear..argh!) */
1015        li      t1,SR_IEC | SR_KUP | SR_KUC
1016        not     t1
1017        and     t0, t1
1018        mtc0    t0, C0_SR
1019        NOP
1020
1021#elif (__mips == 3) || (__mips == 32)
1022
1023        /* make sure EXL and IE are set so ints are disabled & we can update EPC for the return */
1024        li   t1,SR_IE           /* Clear IE first (recommended) */
1025        not  t1
1026        and  t0,t1
1027        mtc0 t0,C0_SR
1028        NOP
1029 
1030        /* apply task's SR with EXL set so the eret will return properly */
1031        or      t0, SR_EXL | SR_IE
1032        mtc0    t0, C0_SR
1033        NOP
1034
1035        /* store new EPC value, which we can do since EXL=0 */
1036        LDREG   t0, R_EPC*R_SZ(sp)
1037        NOP
1038        MTCO    t0, C0_EPC
1039        NOP
1040 
1041#endif
1042
1043
1044
1045
1046
1047
1048#ifdef INSTRUMENT_EXECUTING_THREAD
1049        lw      t0,THREAD_EXECUTING
1050        NOP
1051        sw      t0,0x8001FFF8
1052#endif
1053
1054
1055  /*
1056   *  prepare to get out of interrupt
1057   *  return from interrupt  (maybe to _ISR_Dispatch)
1058   *
1059   *  LABEL "exit interrupt (simple case):"
1060   *  prepare to get out of interrupt
1061   *  return from interrupt
1062   */
1063
1064_ISR_Handler_exit:
1065/*
1066** Skip the SR restore because its a global register. _CPU_Context_switch_restore
1067** adjusts it according to each task's configuration.  If we didn't dispatch, the
1068** SR value isn't changed, so all we need to do is return.
1069**
1070*/
1071        /* restore context from stack */
1072
1073#ifdef INSTRUMENT_EXECUTING_THREAD
1074        lw      t0,THREAD_EXECUTING
1075        NOP
1076        sw      t0, 0x8001FFFC
1077#endif
1078
1079        LDREG t8, R_MDLO*R_SZ(sp)
1080        LDREG t0, R_T0*R_SZ(sp)
1081        mtlo  t8
1082        LDREG t8, R_MDHI*R_SZ(sp)
1083        LDREG t1, R_T1*R_SZ(sp)
1084        mthi  t8
1085        LDREG t2, R_T2*R_SZ(sp)
1086        LDREG t3, R_T3*R_SZ(sp)
1087        LDREG t4, R_T4*R_SZ(sp)
1088        LDREG t5, R_T5*R_SZ(sp)
1089        LDREG t6, R_T6*R_SZ(sp)
1090        LDREG t7, R_T7*R_SZ(sp)
1091        LDREG t8, R_T8*R_SZ(sp)
1092        LDREG t9, R_T9*R_SZ(sp)
1093        LDREG gp, R_GP*R_SZ(sp)
1094        LDREG fp, R_FP*R_SZ(sp)
1095        LDREG ra, R_RA*R_SZ(sp)
1096        LDREG a0, R_A0*R_SZ(sp)
1097        LDREG a1, R_A1*R_SZ(sp)
1098        LDREG a2, R_A2*R_SZ(sp)
1099        LDREG a3, R_A3*R_SZ(sp)
1100        LDREG v1, R_V1*R_SZ(sp)
1101        LDREG v0, R_V0*R_SZ(sp)
1102
1103#if __mips == 1
1104        LDREG     k1, R_EPC*R_SZ(sp)
1105#endif
1106
1107        .set noat
1108        LDREG     AT, R_AT*R_SZ(sp)
1109        .set at
1110
1111        ADDIU     sp,sp,EXCP_STACK_SIZE
1112
1113#if (__mips == 3) || (__mips == 32)
1114        eret
1115#elif __mips == 1
1116        j         k1
1117        rfe
1118#endif
1119        NOP
1120
1121       .set    reorder
1122ENDFRAME(_ISR_Handler)
1123
1124
1125FRAME(mips_break,sp,0,ra)
1126        .set noreorder
1127        break   0x0     /* this statement must be first in this function, assumed so by mips-stub.c */
1128        NOP
1129        j       ra
1130        NOP
1131       .set    reorder
1132ENDFRAME(mips_break)
1133
Note: See TracBrowser for help on using the repository browser.