1 | /* |
---|
2 | * This file contains the basic algorithms for all assembly code used |
---|
3 | * in an specific CPU port of RTEMS. These algorithms must be implemented |
---|
4 | * in assembly language |
---|
5 | * |
---|
6 | * History: |
---|
7 | * Baseline: no_cpu |
---|
8 | * 1996: Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com> |
---|
9 | * COPYRIGHT (c) 1996 by Transition Networks Inc. |
---|
10 | * To anyone who acknowledges that the modifications to this file to |
---|
11 | * port it to the MIPS64ORION are provided "AS IS" without any |
---|
12 | * express or implied warranty: |
---|
13 | * permission to use, copy, modify, and distribute this file |
---|
14 | * for any purpose is hereby granted without fee, provided that |
---|
15 | * the above copyright notice and this notice appears in all |
---|
16 | * copies, and that the name of Transition Networks not be used in |
---|
17 | * advertising or publicity pertaining to distribution of the |
---|
18 | * software without specific, written prior permission. Transition |
---|
19 | * Networks makes no representations about the suitability |
---|
20 | * of this software for any purpose. |
---|
21 | * 2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become |
---|
22 | * the baseline of the more general MIPS port. |
---|
23 | * 2001: Joel Sherrill <joel@OARcorp.com> continued this rework, |
---|
24 | * rewriting as much as possible in C and added the JMR3904 BSP |
---|
25 | * so testing could be performed on a simulator. |
---|
26 | * 2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR |
---|
27 | * performance, tweaking this code and the isr vectoring routines |
---|
28 | * to reduce overhead & latencies. Added optional |
---|
29 | * instrumentation as well. |
---|
30 | * 2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S, |
---|
31 | * cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels |
---|
32 | * and deferred FP contexts. |
---|
33 | * 2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing |
---|
34 | * by increasing the amount of context saved/restored. |
---|
35 | * 2004: 24March, Art Ferrer, NASA/GSFC, added save of FP status/control |
---|
36 | * register to fix intermittent FP error encountered on ST5 mission |
---|
37 | * implementation on Mongoose V processor. |
---|
38 | * 2004: April 7, Greg Menke <gregory.menke@gsfc.nasa.gov> Added __mips==32 |
---|
39 | * support for R4000 processors running 32 bit code. Fixed #define |
---|
40 | * problems that caused fpu code to always be included even when no |
---|
41 | * fpu is present. |
---|
42 | * |
---|
43 | * COPYRIGHT (c) 1989-2002. |
---|
44 | * On-Line Applications Research Corporation (OAR). |
---|
45 | * |
---|
46 | * The license and distribution terms for this file may be |
---|
47 | * found in the file LICENSE in this distribution or at |
---|
48 | * http://www.rtems.org/license/LICENSE. |
---|
49 | */ |
---|
50 | |
---|
51 | #ifdef HAVE_CONFIG_H |
---|
52 | #include "config.h" |
---|
53 | #endif |
---|
54 | |
---|
55 | #include <rtems/asm.h> |
---|
56 | #include <rtems/mips/iregdef.h> |
---|
57 | #include <rtems/mips/idtcpu.h> |
---|
58 | #include <rtems/score/percpu.h> |
---|
59 | |
---|
60 | #define ASSEMBLY_ONLY |
---|
61 | #include <rtems/score/cpu.h> |
---|
62 | |
---|
63 | #if TRUE |
---|
64 | #else |
---|
65 | #error TRUE is not true |
---|
66 | #endif |
---|
67 | #if FALSE |
---|
68 | #error FALSE is not false |
---|
69 | #else |
---|
70 | #endif |
---|
71 | |
---|
72 | /* |
---|
73 | #if ( CPU_HARDWARE_FP == TRUE ) |
---|
74 | #warning CPU_HARDWARE_FP == TRUE |
---|
75 | #else |
---|
76 | #warning CPU_HARDWARE_FP != TRUE |
---|
77 | #endif |
---|
78 | */ |
---|
79 | |
---|
80 | |
---|
81 | /* enable debugging shadow writes to misc ram, this is a vestigal |
---|
82 | * Mongoose-ism debug tool- but may be handy in the future so we |
---|
83 | * left it in... |
---|
84 | */ |
---|
85 | |
---|
86 | /* #define INSTRUMENT_ISR_VECTORING */ |
---|
87 | /* #define INSTRUMENT_EXECUTING_THREAD */ |
---|
88 | |
---|
89 | |
---|
90 | |
---|
91 | /* Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx ) |
---|
92 | * and MIPS ISA Level 1 (R3xxx). |
---|
93 | */ |
---|
94 | |
---|
95 | #if __mips == 3 |
---|
96 | /* 64 bit register operations */ |
---|
97 | #define NOP nop |
---|
98 | #define ADD dadd |
---|
99 | #define STREG sd |
---|
100 | #define LDREG ld |
---|
101 | #define MFCO dmfc0 /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */ |
---|
102 | #define MTCO dmtc0 /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */ |
---|
103 | #define ADDU addu |
---|
104 | #define ADDIU addiu |
---|
105 | #if (__mips_fpr==32) |
---|
106 | #define STREGC1 swc1 |
---|
107 | #define LDREGC1 lwc1 |
---|
108 | #elif (__mips_fpr==64) /* Use these instructions if there are 64 bit floating point registers. This requires FR bit to be set in C0_SR */ |
---|
109 | #define STREGC1 sdc1 |
---|
110 | #define LDREGC1 ldc1 |
---|
111 | #endif |
---|
112 | #define R_SZ 8 |
---|
113 | #define F_SZ 8 |
---|
114 | #define SZ_INT 8 |
---|
115 | #define SZ_INT_POW2 3 |
---|
116 | |
---|
117 | /* XXX if we don't always want 64 bit register ops, then another ifdef */ |
---|
118 | |
---|
119 | #elif (__mips == 1 ) || (__mips == 32) |
---|
120 | /* 32 bit register operations*/ |
---|
121 | #define NOP nop |
---|
122 | #define ADD add |
---|
123 | #define STREG sw |
---|
124 | #define LDREG lw |
---|
125 | #define MFCO mfc0 |
---|
126 | #define MTCO mtc0 |
---|
127 | #define ADDU add |
---|
128 | #define ADDIU addi |
---|
129 | #define STREGC1 swc1 |
---|
130 | #define LDREGC1 lwc1 |
---|
131 | #define R_SZ 4 |
---|
132 | #define F_SZ 4 |
---|
133 | #define SZ_INT 4 |
---|
134 | #define SZ_INT_POW2 2 |
---|
135 | #else |
---|
136 | #error "mips assembly: what size registers do I deal with?" |
---|
137 | #endif |
---|
138 | |
---|
139 | |
---|
140 | #define ISR_VEC_SIZE 4 |
---|
141 | #define EXCP_STACK_SIZE (NREGS*R_SZ) |
---|
142 | |
---|
143 | |
---|
144 | #ifdef __GNUC__ |
---|
145 | #define ASM_EXTERN(x,size) .extern x,size |
---|
146 | #else |
---|
147 | #define ASM_EXTERN(x,size) |
---|
148 | #endif |
---|
149 | |
---|
150 | /* NOTE: these constants must match the Context_Control structure in cpu.h */ |
---|
151 | #define S0_OFFSET 0 |
---|
152 | #define S1_OFFSET 1 |
---|
153 | #define S2_OFFSET 2 |
---|
154 | #define S3_OFFSET 3 |
---|
155 | #define S4_OFFSET 4 |
---|
156 | #define S5_OFFSET 5 |
---|
157 | #define S6_OFFSET 6 |
---|
158 | #define S7_OFFSET 7 |
---|
159 | #define SP_OFFSET 8 |
---|
160 | #define FP_OFFSET 9 |
---|
161 | #define RA_OFFSET 10 |
---|
162 | #define C0_SR_OFFSET 11 |
---|
163 | #define C0_EPC_OFFSET 12 |
---|
164 | |
---|
165 | /* NOTE: these constants must match the Context_Control_fp structure in cpu.h */ |
---|
166 | #define FP0_OFFSET 0 |
---|
167 | #define FP1_OFFSET 1 |
---|
168 | #define FP2_OFFSET 2 |
---|
169 | #define FP3_OFFSET 3 |
---|
170 | #define FP4_OFFSET 4 |
---|
171 | #define FP5_OFFSET 5 |
---|
172 | #define FP6_OFFSET 6 |
---|
173 | #define FP7_OFFSET 7 |
---|
174 | #define FP8_OFFSET 8 |
---|
175 | #define FP9_OFFSET 9 |
---|
176 | #define FP10_OFFSET 10 |
---|
177 | #define FP11_OFFSET 11 |
---|
178 | #define FP12_OFFSET 12 |
---|
179 | #define FP13_OFFSET 13 |
---|
180 | #define FP14_OFFSET 14 |
---|
181 | #define FP15_OFFSET 15 |
---|
182 | #define FP16_OFFSET 16 |
---|
183 | #define FP17_OFFSET 17 |
---|
184 | #define FP18_OFFSET 18 |
---|
185 | #define FP19_OFFSET 19 |
---|
186 | #define FP20_OFFSET 20 |
---|
187 | #define FP21_OFFSET 21 |
---|
188 | #define FP22_OFFSET 22 |
---|
189 | #define FP23_OFFSET 23 |
---|
190 | #define FP24_OFFSET 24 |
---|
191 | #define FP25_OFFSET 25 |
---|
192 | #define FP26_OFFSET 26 |
---|
193 | #define FP27_OFFSET 27 |
---|
194 | #define FP28_OFFSET 28 |
---|
195 | #define FP29_OFFSET 29 |
---|
196 | #define FP30_OFFSET 30 |
---|
197 | #define FP31_OFFSET 31 |
---|
198 | #define FPCS_OFFSET 32 |
---|
199 | |
---|
200 | |
---|
201 | ASM_EXTERN(__exceptionStackFrame, SZ_INT) |
---|
202 | |
---|
203 | /* |
---|
204 | * _CPU_Context_save_fp_context |
---|
205 | * |
---|
206 | * This routine is responsible for saving the FP context |
---|
207 | * at *fp_context_ptr. If the point to load the FP context |
---|
208 | * from is changed then the pointer is modified by this routine. |
---|
209 | * |
---|
210 | * Sometimes a macro implementation of this is in cpu.h which dereferences |
---|
211 | * the ** and a similarly named routine in this file is passed something |
---|
212 | * like a (Context_Control_fp *). The general rule on making this decision |
---|
213 | * is to avoid writing assembly language. |
---|
214 | */ |
---|
215 | |
---|
216 | /* void _CPU_Context_save_fp( |
---|
217 | * void **fp_context_ptr |
---|
218 | * ); |
---|
219 | */ |
---|
220 | |
---|
221 | #if ( CPU_HARDWARE_FP == TRUE ) |
---|
222 | FRAME(_CPU_Context_save_fp,sp,0,ra) |
---|
223 | .set noreorder |
---|
224 | .set noat |
---|
225 | |
---|
226 | /* |
---|
227 | ** Make sure the FPU is on before we save state. This code |
---|
228 | ** is here because the FPU context switch might occur when an |
---|
229 | ** integer task is switching out with a FP task switching in. |
---|
230 | */ |
---|
231 | mfc0 t0,C0_SR |
---|
232 | li t2,SR_CU1 |
---|
233 | move t1,t0 |
---|
234 | or t0,t2 /* turn on the fpu */ |
---|
235 | #if (__mips == 3) || (__mips == 32) |
---|
236 | li t2,SR_IE |
---|
237 | #elif __mips == 1 |
---|
238 | li t2,SR_IEC |
---|
239 | #endif |
---|
240 | not t2 |
---|
241 | and t0,t2 /* turn off interrupts */ |
---|
242 | mtc0 t0,C0_SR |
---|
243 | |
---|
244 | lw a1,(a0) /* get address of context storage area */ |
---|
245 | move t0,ra |
---|
246 | jal _CPU_Context_save_fp_from_exception |
---|
247 | NOP |
---|
248 | |
---|
249 | /* |
---|
250 | ** Reassert the task's state because we've not saved it yet. |
---|
251 | */ |
---|
252 | mtc0 t1,C0_SR |
---|
253 | j t0 |
---|
254 | NOP |
---|
255 | |
---|
256 | .globl _CPU_Context_save_fp_from_exception |
---|
257 | _CPU_Context_save_fp_from_exception: |
---|
258 | STREGC1 $f0,FP0_OFFSET*F_SZ(a1) |
---|
259 | STREGC1 $f1,FP1_OFFSET*F_SZ(a1) |
---|
260 | STREGC1 $f2,FP2_OFFSET*F_SZ(a1) |
---|
261 | STREGC1 $f3,FP3_OFFSET*F_SZ(a1) |
---|
262 | STREGC1 $f4,FP4_OFFSET*F_SZ(a1) |
---|
263 | STREGC1 $f5,FP5_OFFSET*F_SZ(a1) |
---|
264 | STREGC1 $f6,FP6_OFFSET*F_SZ(a1) |
---|
265 | STREGC1 $f7,FP7_OFFSET*F_SZ(a1) |
---|
266 | STREGC1 $f8,FP8_OFFSET*F_SZ(a1) |
---|
267 | STREGC1 $f9,FP9_OFFSET*F_SZ(a1) |
---|
268 | STREGC1 $f10,FP10_OFFSET*F_SZ(a1) |
---|
269 | STREGC1 $f11,FP11_OFFSET*F_SZ(a1) |
---|
270 | STREGC1 $f12,FP12_OFFSET*F_SZ(a1) |
---|
271 | STREGC1 $f13,FP13_OFFSET*F_SZ(a1) |
---|
272 | STREGC1 $f14,FP14_OFFSET*F_SZ(a1) |
---|
273 | STREGC1 $f15,FP15_OFFSET*F_SZ(a1) |
---|
274 | STREGC1 $f16,FP16_OFFSET*F_SZ(a1) |
---|
275 | STREGC1 $f17,FP17_OFFSET*F_SZ(a1) |
---|
276 | STREGC1 $f18,FP18_OFFSET*F_SZ(a1) |
---|
277 | STREGC1 $f19,FP19_OFFSET*F_SZ(a1) |
---|
278 | STREGC1 $f20,FP20_OFFSET*F_SZ(a1) |
---|
279 | STREGC1 $f21,FP21_OFFSET*F_SZ(a1) |
---|
280 | STREGC1 $f22,FP22_OFFSET*F_SZ(a1) |
---|
281 | STREGC1 $f23,FP23_OFFSET*F_SZ(a1) |
---|
282 | STREGC1 $f24,FP24_OFFSET*F_SZ(a1) |
---|
283 | STREGC1 $f25,FP25_OFFSET*F_SZ(a1) |
---|
284 | STREGC1 $f26,FP26_OFFSET*F_SZ(a1) |
---|
285 | STREGC1 $f27,FP27_OFFSET*F_SZ(a1) |
---|
286 | STREGC1 $f28,FP28_OFFSET*F_SZ(a1) |
---|
287 | STREGC1 $f29,FP29_OFFSET*F_SZ(a1) |
---|
288 | STREGC1 $f30,FP30_OFFSET*F_SZ(a1) |
---|
289 | STREGC1 $f31,FP31_OFFSET*F_SZ(a1) |
---|
290 | cfc1 a0,$31 /* Read FP status/conrol reg */ |
---|
291 | cfc1 a0,$31 /* Two reads clear pipeline */ |
---|
292 | NOP |
---|
293 | NOP |
---|
294 | sw a0, FPCS_OFFSET*F_SZ(a1) /* Store value to FPCS location */ |
---|
295 | NOP |
---|
296 | j ra |
---|
297 | NOP |
---|
298 | .set at |
---|
299 | ENDFRAME(_CPU_Context_save_fp) |
---|
300 | #endif |
---|
301 | |
---|
302 | /* |
---|
303 | * _CPU_Context_restore_fp_context |
---|
304 | * |
---|
305 | * This routine is responsible for restoring the FP context |
---|
306 | * at *fp_context_ptr. If the point to load the FP context |
---|
307 | * from is changed then the pointer is modified by this routine. |
---|
308 | * |
---|
309 | * Sometimes a macro implementation of this is in cpu.h which dereferences |
---|
310 | * the ** and a similarly named routine in this file is passed something |
---|
311 | * like a (Context_Control_fp *). The general rule on making this decision |
---|
312 | * is to avoid writing assembly language. |
---|
313 | */ |
---|
314 | |
---|
315 | /* void _CPU_Context_restore_fp( |
---|
316 | * void **fp_context_ptr |
---|
317 | * ) |
---|
318 | */ |
---|
319 | |
---|
320 | #if ( CPU_HARDWARE_FP == TRUE ) |
---|
321 | FRAME(_CPU_Context_restore_fp,sp,0,ra) |
---|
322 | .set noat |
---|
323 | .set noreorder |
---|
324 | |
---|
325 | /* |
---|
326 | ** Make sure the FPU is on before we retrieve state. This code |
---|
327 | ** is here because the FPU context switch might occur when an |
---|
328 | ** integer task is switching out with a FP task switching in. |
---|
329 | */ |
---|
330 | mfc0 t0,C0_SR |
---|
331 | li t2,SR_CU1 |
---|
332 | move t1,t0 |
---|
333 | or t0,t2 /* turn on the fpu */ |
---|
334 | #if (__mips == 3) || (__mips == 32) |
---|
335 | li t2,SR_IE |
---|
336 | #elif __mips == 1 |
---|
337 | li t2,SR_IEC |
---|
338 | #endif |
---|
339 | not t2 |
---|
340 | and t0,t2 /* turn off interrupts */ |
---|
341 | mtc0 t0,C0_SR |
---|
342 | |
---|
343 | lw a1,(a0) /* get address of context storage area */ |
---|
344 | move t0,ra |
---|
345 | jal _CPU_Context_restore_fp_from_exception |
---|
346 | NOP |
---|
347 | |
---|
348 | /* |
---|
349 | ** Reassert the old task's state because we've not restored the |
---|
350 | ** new one yet. |
---|
351 | */ |
---|
352 | mtc0 t1,C0_SR |
---|
353 | j t0 |
---|
354 | NOP |
---|
355 | |
---|
356 | .globl _CPU_Context_restore_fp_from_exception |
---|
357 | _CPU_Context_restore_fp_from_exception: |
---|
358 | LDREGC1 $f0,FP0_OFFSET*F_SZ(a1) |
---|
359 | LDREGC1 $f1,FP1_OFFSET*F_SZ(a1) |
---|
360 | LDREGC1 $f2,FP2_OFFSET*F_SZ(a1) |
---|
361 | LDREGC1 $f3,FP3_OFFSET*F_SZ(a1) |
---|
362 | LDREGC1 $f4,FP4_OFFSET*F_SZ(a1) |
---|
363 | LDREGC1 $f5,FP5_OFFSET*F_SZ(a1) |
---|
364 | LDREGC1 $f6,FP6_OFFSET*F_SZ(a1) |
---|
365 | LDREGC1 $f7,FP7_OFFSET*F_SZ(a1) |
---|
366 | LDREGC1 $f8,FP8_OFFSET*F_SZ(a1) |
---|
367 | LDREGC1 $f9,FP9_OFFSET*F_SZ(a1) |
---|
368 | LDREGC1 $f10,FP10_OFFSET*F_SZ(a1) |
---|
369 | LDREGC1 $f11,FP11_OFFSET*F_SZ(a1) |
---|
370 | LDREGC1 $f12,FP12_OFFSET*F_SZ(a1) |
---|
371 | LDREGC1 $f13,FP13_OFFSET*F_SZ(a1) |
---|
372 | LDREGC1 $f14,FP14_OFFSET*F_SZ(a1) |
---|
373 | LDREGC1 $f15,FP15_OFFSET*F_SZ(a1) |
---|
374 | LDREGC1 $f16,FP16_OFFSET*F_SZ(a1) |
---|
375 | LDREGC1 $f17,FP17_OFFSET*F_SZ(a1) |
---|
376 | LDREGC1 $f18,FP18_OFFSET*F_SZ(a1) |
---|
377 | LDREGC1 $f19,FP19_OFFSET*F_SZ(a1) |
---|
378 | LDREGC1 $f20,FP20_OFFSET*F_SZ(a1) |
---|
379 | LDREGC1 $f21,FP21_OFFSET*F_SZ(a1) |
---|
380 | LDREGC1 $f22,FP22_OFFSET*F_SZ(a1) |
---|
381 | LDREGC1 $f23,FP23_OFFSET*F_SZ(a1) |
---|
382 | LDREGC1 $f24,FP24_OFFSET*F_SZ(a1) |
---|
383 | LDREGC1 $f25,FP25_OFFSET*F_SZ(a1) |
---|
384 | LDREGC1 $f26,FP26_OFFSET*F_SZ(a1) |
---|
385 | LDREGC1 $f27,FP27_OFFSET*F_SZ(a1) |
---|
386 | LDREGC1 $f28,FP28_OFFSET*F_SZ(a1) |
---|
387 | LDREGC1 $f29,FP29_OFFSET*F_SZ(a1) |
---|
388 | LDREGC1 $f30,FP30_OFFSET*F_SZ(a1) |
---|
389 | LDREGC1 $f31,FP31_OFFSET*F_SZ(a1) |
---|
390 | cfc1 a0,$31 /* Read from FP status/control reg */ |
---|
391 | cfc1 a0,$31 /* Two reads clear pipeline */ |
---|
392 | NOP /* NOPs ensure execution */ |
---|
393 | NOP |
---|
394 | lw a0,FPCS_OFFSET*F_SZ(a1) /* Load saved FPCS value */ |
---|
395 | NOP |
---|
396 | ctc1 a0,$31 /* Restore FPCS register */ |
---|
397 | NOP |
---|
398 | j ra |
---|
399 | NOP |
---|
400 | .set at |
---|
401 | ENDFRAME(_CPU_Context_restore_fp) |
---|
402 | #endif |
---|
403 | |
---|
404 | /* _CPU_Context_switch |
---|
405 | * |
---|
406 | * This routine performs a normal non-FP context switch. |
---|
407 | */ |
---|
408 | |
---|
409 | /* void _CPU_Context_switch( |
---|
410 | * Context_Control *run, |
---|
411 | * Context_Control *heir |
---|
412 | * ) |
---|
413 | */ |
---|
414 | |
---|
415 | FRAME(_CPU_Context_switch,sp,0,ra) |
---|
416 | .set noreorder |
---|
417 | |
---|
418 | mfc0 t0,C0_SR |
---|
419 | #if (__mips == 3) || (__mips == 32) |
---|
420 | li t1,SR_IE |
---|
421 | #elif __mips == 1 |
---|
422 | li t1,SR_IEC |
---|
423 | #endif |
---|
424 | STREG t0,C0_SR_OFFSET*R_SZ(a0) /* save the task's SR */ |
---|
425 | not t1 |
---|
426 | and t0,t1 /* mask off interrupts while we context switch */ |
---|
427 | mtc0 t0,C0_SR |
---|
428 | NOP |
---|
429 | |
---|
430 | STREG ra,RA_OFFSET*R_SZ(a0) /* save current context */ |
---|
431 | STREG sp,SP_OFFSET*R_SZ(a0) |
---|
432 | STREG fp,FP_OFFSET*R_SZ(a0) |
---|
433 | STREG s0,S0_OFFSET*R_SZ(a0) |
---|
434 | STREG s1,S1_OFFSET*R_SZ(a0) |
---|
435 | STREG s2,S2_OFFSET*R_SZ(a0) |
---|
436 | STREG s3,S3_OFFSET*R_SZ(a0) |
---|
437 | STREG s4,S4_OFFSET*R_SZ(a0) |
---|
438 | STREG s5,S5_OFFSET*R_SZ(a0) |
---|
439 | STREG s6,S6_OFFSET*R_SZ(a0) |
---|
440 | STREG s7,S7_OFFSET*R_SZ(a0) |
---|
441 | |
---|
442 | |
---|
443 | /* |
---|
444 | ** this code grabs the userspace EPC if we're dispatching from |
---|
445 | ** an interrupt frame or supplies the address of the dispatch |
---|
446 | ** routines if not. This is entirely for the gdbstub's benefit so |
---|
447 | ** it can know where each task is running. |
---|
448 | ** |
---|
449 | ** Its value is only set when calling threadDispatch from |
---|
450 | ** the interrupt handler and is cleared immediately when this |
---|
451 | ** routine gets it. |
---|
452 | */ |
---|
453 | |
---|
454 | la t0,__exceptionStackFrame /* see if we're coming in from an exception */ |
---|
455 | LDREG t1, (t0) |
---|
456 | NOP |
---|
457 | beqz t1,1f |
---|
458 | |
---|
459 | STREG zero, (t0) /* and clear it */ |
---|
460 | NOP |
---|
461 | LDREG t0,R_EPC*R_SZ(t1) /* get the userspace EPC from the frame */ |
---|
462 | b 2f |
---|
463 | NOP |
---|
464 | |
---|
465 | 1: la t0,_Thread_Dispatch /* if ==0, we're switched out */ |
---|
466 | |
---|
467 | 2: STREG t0,C0_EPC_OFFSET*R_SZ(a0) |
---|
468 | |
---|
469 | |
---|
470 | _CPU_Context_switch_restore: |
---|
471 | LDREG ra,RA_OFFSET*R_SZ(a1) /* restore context */ |
---|
472 | LDREG sp,SP_OFFSET*R_SZ(a1) |
---|
473 | LDREG fp,FP_OFFSET*R_SZ(a1) |
---|
474 | LDREG s0,S0_OFFSET*R_SZ(a1) |
---|
475 | LDREG s1,S1_OFFSET*R_SZ(a1) |
---|
476 | LDREG s2,S2_OFFSET*R_SZ(a1) |
---|
477 | LDREG s3,S3_OFFSET*R_SZ(a1) |
---|
478 | LDREG s4,S4_OFFSET*R_SZ(a1) |
---|
479 | LDREG s5,S5_OFFSET*R_SZ(a1) |
---|
480 | LDREG s6,S6_OFFSET*R_SZ(a1) |
---|
481 | LDREG s7,S7_OFFSET*R_SZ(a1) |
---|
482 | |
---|
483 | LDREG t0, C0_SR_OFFSET*R_SZ(a1) |
---|
484 | |
---|
485 | /* NOP */ |
---|
486 | /*#if (__mips == 3) || (__mips == 32) */ |
---|
487 | /* andi t0,SR_EXL */ |
---|
488 | /* bnez t0,_CPU_Context_1 */ /* set exception level from restore context */ |
---|
489 | /* li t0,~SR_EXL */ |
---|
490 | /* MFC0 t1,C0_SR */ |
---|
491 | /* NOP */ |
---|
492 | /* and t1,t0 */ |
---|
493 | /* MTC0 t1,C0_SR */ |
---|
494 | /* */ |
---|
495 | /*#elif __mips == 1 */ |
---|
496 | /* */ |
---|
497 | /* andi t0,(SR_INTERRUPT_ENABLE_BITS) */ /* we know 0 disabled */ |
---|
498 | /* beq t0,$0,_CPU_Context_1 */ /* set level from restore context */ |
---|
499 | /* MFC0 t0,C0_SR */ |
---|
500 | /* NOP */ |
---|
501 | /* or t0,(SR_INTERRUPT_ENABLE_BITS) */ /* new_sr = old sr with enabled */ |
---|
502 | /* MTC0 t0,C0_SR */ /* set with enabled */ |
---|
503 | /* NOP */ |
---|
504 | |
---|
505 | |
---|
506 | /* |
---|
507 | ** Incorporate the incoming task's FP coprocessor state and interrupt mask/enable |
---|
508 | ** into the status register. We jump thru the requisite hoops to ensure we |
---|
509 | ** maintain all other SR bits as global values. |
---|
510 | ** |
---|
511 | ** Get the task's FPU enable, int mask & int enable bits. Although we keep the |
---|
512 | ** software int enables on a per-task basis, the rtems_task_create |
---|
513 | ** Interrupt Level & int level manipulation functions cannot enable/disable them, |
---|
514 | ** so they are automatically enabled for all tasks. To turn them off, a task |
---|
515 | ** must itself manipulate the SR register. |
---|
516 | ** |
---|
517 | ** Although something of a hack on this processor, we treat the SR register |
---|
518 | ** int enables as the RTEMS interrupt level. We use the int level |
---|
519 | ** value as a bitmask, not as any sort of greater than/less than metric. |
---|
520 | ** Manipulation of a task's interrupt level corresponds directly to manipulation |
---|
521 | ** of that task's SR bits, as seen in cpu.c |
---|
522 | ** |
---|
523 | ** Note, interrupts are disabled before context is saved, though the task's |
---|
524 | ** interrupt enable state is recorded. The task swapping in will apply its |
---|
525 | ** specific SR bits, including interrupt enable. If further task-specific |
---|
526 | ** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and |
---|
527 | ** cpu.h task initialization code that will be affected. |
---|
528 | */ |
---|
529 | |
---|
530 | li t2,SR_CU1 |
---|
531 | or t2,SR_IMASK |
---|
532 | |
---|
533 | /* int enable bits */ |
---|
534 | #if (__mips == 3) || (__mips == 32) |
---|
535 | /* |
---|
536 | ** Save IE |
---|
537 | */ |
---|
538 | or t2,SR_IE |
---|
539 | #elif __mips == 1 |
---|
540 | /* |
---|
541 | ** Save current, previous & old int enables. This is key because |
---|
542 | ** we can dispatch from within the stack frame used by an |
---|
543 | ** interrupt service. The int enables nest, but not beyond |
---|
544 | ** previous and old because of the dispatch interlock seen |
---|
545 | ** in the interrupt processing code. |
---|
546 | */ |
---|
547 | or t2,SR_IEC + SR_IEP + SR_IEO |
---|
548 | #endif |
---|
549 | and t0,t2 /* keep only the per-task bits */ |
---|
550 | |
---|
551 | mfc0 t1,C0_SR /* grab the current SR */ |
---|
552 | not t2 |
---|
553 | and t1,t2 /* mask off the old task's per-task bits */ |
---|
554 | or t1,t0 /* or in the new task's bits */ |
---|
555 | mtc0 t1,C0_SR /* and load the new SR */ |
---|
556 | NOP |
---|
557 | |
---|
558 | /* _CPU_Context_1: */ |
---|
559 | j ra |
---|
560 | NOP |
---|
561 | ENDFRAME(_CPU_Context_switch) |
---|
562 | |
---|
563 | |
---|
564 | /* |
---|
565 | * _CPU_Context_restore |
---|
566 | * |
---|
567 | * This routine is generally used only to restart self in an |
---|
568 | * efficient manner. It may simply be a label in _CPU_Context_switch. |
---|
569 | * |
---|
570 | * NOTE: May be unnecessary to reload some registers. |
---|
571 | * |
---|
572 | * void _CPU_Context_restore( |
---|
573 | * Context_Control *new_context |
---|
574 | * ); |
---|
575 | */ |
---|
576 | |
---|
577 | FRAME(_CPU_Context_restore,sp,0,ra) |
---|
578 | .set noreorder |
---|
579 | move a1,a0 |
---|
580 | j _CPU_Context_switch_restore |
---|
581 | NOP |
---|
582 | |
---|
583 | ENDFRAME(_CPU_Context_restore) |
---|
584 | |
---|
585 | .extern _Thread_Dispatch |
---|
586 | |
---|
587 | /* void _DBG_Handler() |
---|
588 | * |
---|
589 | * This routine services the (at least) MIPS1 debug vector, |
---|
590 | * only used the the hardware debugging features. This code, |
---|
591 | * while optional, is best located here because its intrinsically |
---|
592 | * associated with exceptions in general & thus tied pretty |
---|
593 | * closely to _ISR_Handler. |
---|
594 | */ |
---|
595 | FRAME(_DBG_Handler,sp,0,ra) |
---|
596 | .set noreorder |
---|
597 | la k0,_ISR_Handler |
---|
598 | j k0 |
---|
599 | NOP |
---|
600 | .set reorder |
---|
601 | ENDFRAME(_DBG_Handler) |
---|
602 | |
---|
603 | /* void __ISR_Handler() |
---|
604 | * |
---|
605 | * This routine provides the RTEMS interrupt management. |
---|
606 | * |
---|
607 | * void _ISR_Handler() |
---|
608 | * |
---|
609 | * |
---|
610 | * This discussion ignores a lot of the ugly details in a real |
---|
611 | * implementation such as saving enough registers/state to be |
---|
612 | * able to do something real. Keep in mind that the goal is |
---|
613 | * to invoke a user's ISR handler which is written in C and |
---|
614 | * uses a certain set of registers. |
---|
615 | * |
---|
616 | * Also note that the exact order is to a large extent flexible. |
---|
617 | * Hardware will dictate a sequence for a certain subset of |
---|
618 | * _ISR_Handler while requirements for setting |
---|
619 | * |
---|
620 | * At entry to "common" _ISR_Handler, the vector number must be |
---|
621 | * available. On some CPUs the hardware puts either the vector |
---|
622 | * number or the offset into the vector table for this ISR in a |
---|
623 | * known place. If the hardware does not give us this information, |
---|
624 | * then the assembly portion of RTEMS for this port will contain |
---|
625 | * a set of distinct interrupt entry points which somehow place |
---|
626 | * the vector number in a known place (which is safe if another |
---|
627 | * interrupt nests this one) and branches to _ISR_Handler. |
---|
628 | * |
---|
629 | */ |
---|
630 | |
---|
631 | FRAME(_ISR_Handler,sp,0,ra) |
---|
632 | .set noreorder |
---|
633 | |
---|
634 | /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */ |
---|
635 | |
---|
636 | /* wastes a lot of stack space for context?? */ |
---|
637 | ADDIU sp,sp,-EXCP_STACK_SIZE |
---|
638 | |
---|
639 | STREG ra, R_RA*R_SZ(sp) /* store ra on the stack */ |
---|
640 | STREG v0, R_V0*R_SZ(sp) |
---|
641 | STREG v1, R_V1*R_SZ(sp) |
---|
642 | STREG a0, R_A0*R_SZ(sp) |
---|
643 | STREG a1, R_A1*R_SZ(sp) |
---|
644 | STREG a2, R_A2*R_SZ(sp) |
---|
645 | STREG a3, R_A3*R_SZ(sp) |
---|
646 | STREG t0, R_T0*R_SZ(sp) |
---|
647 | STREG t1, R_T1*R_SZ(sp) |
---|
648 | STREG t2, R_T2*R_SZ(sp) |
---|
649 | STREG t3, R_T3*R_SZ(sp) |
---|
650 | STREG t4, R_T4*R_SZ(sp) |
---|
651 | STREG t5, R_T5*R_SZ(sp) |
---|
652 | STREG t6, R_T6*R_SZ(sp) |
---|
653 | STREG t7, R_T7*R_SZ(sp) |
---|
654 | mflo t0 |
---|
655 | STREG t8, R_T8*R_SZ(sp) |
---|
656 | STREG t0, R_MDLO*R_SZ(sp) |
---|
657 | STREG t9, R_T9*R_SZ(sp) |
---|
658 | mfhi t0 |
---|
659 | STREG gp, R_GP*R_SZ(sp) |
---|
660 | STREG t0, R_MDHI*R_SZ(sp) |
---|
661 | STREG fp, R_FP*R_SZ(sp) |
---|
662 | |
---|
663 | .set noat |
---|
664 | STREG AT, R_AT*R_SZ(sp) |
---|
665 | .set at |
---|
666 | |
---|
667 | mfc0 t0,C0_SR |
---|
668 | MFCO t1,C0_EPC |
---|
669 | STREG t0,R_SR*R_SZ(sp) |
---|
670 | STREG t1,R_EPC*R_SZ(sp) |
---|
671 | |
---|
672 | |
---|
673 | #ifdef INSTRUMENT_EXECUTING_THREAD |
---|
674 | lw t2, THREAD_EXECUTING |
---|
675 | NOP |
---|
676 | sw t2, 0x8001FFF0 |
---|
677 | #endif |
---|
678 | |
---|
679 | /* determine if an interrupt generated this exception */ |
---|
680 | |
---|
681 | mfc0 t0,C0_CAUSE |
---|
682 | NOP |
---|
683 | |
---|
684 | and t1,t0,CAUSE_EXCMASK |
---|
685 | beq t1, 0, _ISR_Handler_1 |
---|
686 | |
---|
687 | _ISR_Handler_Exception: |
---|
688 | |
---|
689 | /* If we return from the exception, it is assumed nothing |
---|
690 | * bad is going on and we can continue to run normally. |
---|
691 | * But we want to save the entire CPU context so exception |
---|
692 | * handlers can look at it and change it. |
---|
693 | * |
---|
694 | * NOTE: This is the path the debugger stub will take. |
---|
695 | */ |
---|
696 | |
---|
697 | /* already got t0 = cause in the interrupt test above */ |
---|
698 | STREG t0,R_CAUSE*R_SZ(sp) |
---|
699 | |
---|
700 | STREG sp, R_SP*R_SZ(sp) |
---|
701 | |
---|
702 | STREG s0,R_S0*R_SZ(sp) /* save s0 - s7 */ |
---|
703 | STREG s1,R_S1*R_SZ(sp) |
---|
704 | STREG s2,R_S2*R_SZ(sp) |
---|
705 | STREG s3,R_S3*R_SZ(sp) |
---|
706 | STREG s4,R_S4*R_SZ(sp) |
---|
707 | STREG s5,R_S5*R_SZ(sp) |
---|
708 | STREG s6,R_S6*R_SZ(sp) |
---|
709 | STREG s7,R_S7*R_SZ(sp) |
---|
710 | |
---|
711 | /* CP0 special registers */ |
---|
712 | |
---|
713 | #if __mips == 1 |
---|
714 | mfc0 t0,C0_TAR |
---|
715 | #endif |
---|
716 | MFCO t1,C0_BADVADDR |
---|
717 | |
---|
718 | #if __mips == 1 |
---|
719 | STREG t0,R_TAR*R_SZ(sp) |
---|
720 | #else |
---|
721 | NOP |
---|
722 | #endif |
---|
723 | STREG t1,R_BADVADDR*R_SZ(sp) |
---|
724 | |
---|
725 | #if ( CPU_HARDWARE_FP == TRUE ) |
---|
726 | mfc0 t0,C0_SR /* FPU is enabled, save state */ |
---|
727 | NOP |
---|
728 | srl t0,t0,16 |
---|
729 | andi t0,t0,(SR_CU1 >> 16) |
---|
730 | beqz t0, 1f |
---|
731 | NOP |
---|
732 | |
---|
733 | la a1,R_F0*R_SZ(sp) |
---|
734 | jal _CPU_Context_save_fp_from_exception |
---|
735 | NOP |
---|
736 | mfc1 t0,C1_REVISION |
---|
737 | mfc1 t1,C1_STATUS |
---|
738 | STREG t0,R_FEIR*R_SZ(sp) |
---|
739 | STREG t1,R_FCSR*R_SZ(sp) |
---|
740 | |
---|
741 | 1: |
---|
742 | #endif |
---|
743 | |
---|
744 | move a0,sp |
---|
745 | jal mips_vector_exceptions |
---|
746 | NOP |
---|
747 | |
---|
748 | |
---|
749 | /* |
---|
750 | ** Note, if the exception vector returns, rely on it to have |
---|
751 | ** adjusted EPC so we will return to some correct address. If |
---|
752 | ** this is not done, we might get stuck in an infinite loop because |
---|
753 | ** we'll return to the instruction where the exception occured and |
---|
754 | ** it could throw again. |
---|
755 | ** |
---|
756 | ** It is expected the only code using the exception processing is |
---|
757 | ** either the gdb stub or some user code which is either going to |
---|
758 | ** panic or do something useful. Regardless, it is up to each |
---|
759 | ** exception routine to properly adjust EPC, so the code below |
---|
760 | ** may be helpful for doing just that. |
---|
761 | */ |
---|
762 | |
---|
763 | /* ********************************************************************* |
---|
764 | ** this code follows the R3000's exception return logic, but is not |
---|
765 | ** needed because the gdb stub does it for us. It might be useful |
---|
766 | ** for something else at some point... |
---|
767 | ** |
---|
768 | * compute the address of the instruction we'll return to * |
---|
769 | |
---|
770 | LDREG t1, R_CAUSE*R_SZ(sp) |
---|
771 | LDREG t0, R_EPC*R_SZ(sp) |
---|
772 | |
---|
773 | * first see if the exception happened in the delay slot * |
---|
774 | li t3,CAUSE_BD |
---|
775 | AND t4,t1,t3 |
---|
776 | beqz t4,excnodelay |
---|
777 | NOP |
---|
778 | |
---|
779 | * it did, now see if the branch occured or not * |
---|
780 | li t3,CAUSE_BT |
---|
781 | AND t4,t1,t3 |
---|
782 | beqz t4,excnobranch |
---|
783 | NOP |
---|
784 | |
---|
785 | * branch was taken, we resume at the branch target * |
---|
786 | LDREG t0, R_TAR*R_SZ(sp) |
---|
787 | j excreturn |
---|
788 | NOP |
---|
789 | |
---|
790 | excnobranch: |
---|
791 | ADDU t0,R_SZ |
---|
792 | |
---|
793 | excnodelay: |
---|
794 | ADDU t0,R_SZ |
---|
795 | |
---|
796 | excreturn: |
---|
797 | STREG t0, R_EPC*R_SZ(sp) |
---|
798 | NOP |
---|
799 | ********************************************************************* */ |
---|
800 | |
---|
801 | |
---|
802 | /* if we're returning into mips_break, move to the next instruction */ |
---|
803 | |
---|
804 | LDREG t0,R_EPC*R_SZ(sp) |
---|
805 | la t1,mips_break |
---|
806 | xor t2,t0,t1 |
---|
807 | bnez t2,3f |
---|
808 | |
---|
809 | addu t0,R_SZ |
---|
810 | STREG t0,R_EPC*R_SZ(sp) |
---|
811 | NOP |
---|
812 | 3: |
---|
813 | |
---|
814 | |
---|
815 | |
---|
816 | |
---|
817 | #if ( CPU_HARDWARE_FP == TRUE ) |
---|
818 | mfc0 t0,C0_SR /* FPU is enabled, restore state */ |
---|
819 | NOP |
---|
820 | srl t0,t0,16 |
---|
821 | andi t0,t0,(SR_CU1 >> 16) |
---|
822 | beqz t0, 2f |
---|
823 | NOP |
---|
824 | |
---|
825 | la a1,R_F0*R_SZ(sp) |
---|
826 | jal _CPU_Context_restore_fp_from_exception |
---|
827 | NOP |
---|
828 | LDREG t0,R_FEIR*R_SZ(sp) |
---|
829 | LDREG t1,R_FCSR*R_SZ(sp) |
---|
830 | mtc1 t0,C1_REVISION |
---|
831 | mtc1 t1,C1_STATUS |
---|
832 | 2: |
---|
833 | #endif |
---|
834 | LDREG s0,R_S0*R_SZ(sp) /* restore s0 - s7 */ |
---|
835 | LDREG s1,R_S1*R_SZ(sp) |
---|
836 | LDREG s2,R_S2*R_SZ(sp) |
---|
837 | LDREG s3,R_S3*R_SZ(sp) |
---|
838 | LDREG s4,R_S4*R_SZ(sp) |
---|
839 | LDREG s5,R_S5*R_SZ(sp) |
---|
840 | LDREG s6,R_S6*R_SZ(sp) |
---|
841 | LDREG s7,R_S7*R_SZ(sp) |
---|
842 | |
---|
843 | /* do NOT restore the sp as this could mess up the world */ |
---|
844 | /* do NOT restore the cause as this could mess up the world */ |
---|
845 | |
---|
846 | /* |
---|
847 | ** Jump all the way out. If theres a pending interrupt, just |
---|
848 | ** let it be serviced later. Since we're probably using the |
---|
849 | ** gdb stub, we've already disrupted the ISR service timing |
---|
850 | ** anyhow. We oughtn't mix exception and interrupt processing |
---|
851 | ** in the same exception call in case the exception stuff |
---|
852 | ** might interfere with the dispatching & timer ticks. |
---|
853 | */ |
---|
854 | j _ISR_Handler_exit |
---|
855 | NOP |
---|
856 | |
---|
857 | _ISR_Handler_1: |
---|
858 | |
---|
859 | mfc0 t1,C0_SR |
---|
860 | and t0,CAUSE_IPMASK |
---|
861 | and t0,t1 |
---|
862 | |
---|
863 | /* external interrupt not enabled, ignore */ |
---|
864 | /* but if it's not an exception or an interrupt, */ |
---|
865 | /* Then where did it come from??? */ |
---|
866 | |
---|
867 | beq t0,zero,_ISR_Handler_exit |
---|
868 | NOP |
---|
869 | |
---|
870 | |
---|
871 | /* |
---|
872 | * save some or all context on stack |
---|
873 | * may need to save some special interrupt information for exit |
---|
874 | * |
---|
875 | * if ( _ISR_Nest_level == 0 ) |
---|
876 | * switch to software interrupt stack |
---|
877 | */ |
---|
878 | |
---|
879 | |
---|
880 | /* |
---|
881 | * _ISR_Nest_level++; |
---|
882 | */ |
---|
883 | lw t0,ISR_NEST_LEVEL |
---|
884 | NOP |
---|
885 | add t0,t0,1 |
---|
886 | sw t0,ISR_NEST_LEVEL |
---|
887 | /* |
---|
888 | * _Thread_Dispatch_disable_level++; |
---|
889 | */ |
---|
890 | lw t1,THREAD_DISPATCH_DISABLE_LEVEL |
---|
891 | NOP |
---|
892 | add t1,t1,1 |
---|
893 | sw t1,THREAD_DISPATCH_DISABLE_LEVEL |
---|
894 | |
---|
895 | /* |
---|
896 | * Call the CPU model or BSP specific routine to decode the |
---|
897 | * interrupt source and actually vector to device ISR handlers. |
---|
898 | */ |
---|
899 | |
---|
900 | #ifdef INSTRUMENT_ISR_VECTORING |
---|
901 | NOP |
---|
902 | li t1, 1 |
---|
903 | sw t1, 0x8001e000 |
---|
904 | #endif |
---|
905 | |
---|
906 | move a0,sp |
---|
907 | jal mips_vector_isr_handlers |
---|
908 | NOP |
---|
909 | |
---|
910 | #ifdef INSTRUMENT_ISR_VECTORING |
---|
911 | li t1, 0 |
---|
912 | sw t1, 0x8001e000 |
---|
913 | NOP |
---|
914 | #endif |
---|
915 | |
---|
916 | /* |
---|
917 | * --_ISR_Nest_level; |
---|
918 | */ |
---|
919 | lw t2,ISR_NEST_LEVEL |
---|
920 | NOP |
---|
921 | add t2,t2,-1 |
---|
922 | sw t2,ISR_NEST_LEVEL |
---|
923 | /* |
---|
924 | * --_Thread_Dispatch_disable_level; |
---|
925 | */ |
---|
926 | lw t1,THREAD_DISPATCH_DISABLE_LEVEL |
---|
927 | NOP |
---|
928 | add t1,t1,-1 |
---|
929 | sw t1,THREAD_DISPATCH_DISABLE_LEVEL |
---|
930 | /* |
---|
931 | * if ( _Thread_Dispatch_disable_level || _ISR_Nest_level ) |
---|
932 | * goto the label "exit interrupt (simple case)" |
---|
933 | */ |
---|
934 | or t0,t2,t1 |
---|
935 | bne t0,zero,_ISR_Handler_exit |
---|
936 | NOP |
---|
937 | |
---|
938 | |
---|
939 | /* |
---|
940 | * restore stack |
---|
941 | * |
---|
942 | * if !_Thread_Dispatch_necessary |
---|
943 | * goto the label "exit interrupt (simple case)" |
---|
944 | */ |
---|
945 | lbu t0,DISPATCH_NEEDED |
---|
946 | NOP |
---|
947 | or t0,t0,t0 |
---|
948 | beq t0,zero,_ISR_Handler_exit |
---|
949 | NOP |
---|
950 | |
---|
951 | |
---|
952 | |
---|
953 | #ifdef INSTRUMENT_EXECUTING_THREAD |
---|
954 | lw t0,THREAD_EXECUTING |
---|
955 | NOP |
---|
956 | sw t0,0x8001FFF4 |
---|
957 | #endif |
---|
958 | |
---|
959 | /* |
---|
960 | ** Turn on interrupts before entering Thread_Dispatch which |
---|
961 | ** will run for a while, thus allowing new interrupts to |
---|
962 | ** be serviced. Observe the Thread_Dispatch_disable_level interlock |
---|
963 | ** that prevents recursive entry into Thread_Dispatch. |
---|
964 | */ |
---|
965 | |
---|
966 | mfc0 t0, C0_SR |
---|
967 | #if __mips == 1 |
---|
968 | |
---|
969 | li t1,SR_IEC |
---|
970 | or t0, t1 |
---|
971 | |
---|
972 | #elif (__mips == 3) || (__mips == 32) |
---|
973 | |
---|
974 | /* |
---|
975 | ** clear XL and set IE so we can get interrupts. |
---|
976 | */ |
---|
977 | li t1, SR_EXL |
---|
978 | not t1 |
---|
979 | and t0,t1 |
---|
980 | or t0, SR_IE |
---|
981 | |
---|
982 | #endif |
---|
983 | mtc0 t0, C0_SR |
---|
984 | NOP |
---|
985 | |
---|
986 | /* save off our stack frame so the context switcher can get to it */ |
---|
987 | la t0,__exceptionStackFrame |
---|
988 | STREG sp,(t0) |
---|
989 | |
---|
990 | jal _Thread_Dispatch |
---|
991 | NOP |
---|
992 | |
---|
993 | /* |
---|
994 | ** And make sure its clear in case we didn't dispatch. if we did, its |
---|
995 | ** already cleared |
---|
996 | */ |
---|
997 | la t0,__exceptionStackFrame |
---|
998 | STREG zero,(t0) |
---|
999 | NOP |
---|
1000 | |
---|
1001 | /* |
---|
1002 | ** turn interrupts back off while we restore context so |
---|
1003 | ** a badly timed interrupt won't mess things up |
---|
1004 | */ |
---|
1005 | mfc0 t0, C0_SR |
---|
1006 | |
---|
1007 | #if __mips == 1 |
---|
1008 | |
---|
1009 | /* ints off, current & prev kernel mode on (kernel mode enabled is bit clear..argh!) */ |
---|
1010 | li t1,SR_IEC | SR_KUP | SR_KUC |
---|
1011 | not t1 |
---|
1012 | and t0, t1 |
---|
1013 | mtc0 t0, C0_SR |
---|
1014 | NOP |
---|
1015 | |
---|
1016 | #elif (__mips == 3) || (__mips == 32) |
---|
1017 | |
---|
1018 | /* make sure EXL and IE are set so ints are disabled & we can update EPC for the return */ |
---|
1019 | li t1,SR_IE /* Clear IE first (recommended) */ |
---|
1020 | not t1 |
---|
1021 | and t0,t1 |
---|
1022 | mtc0 t0,C0_SR |
---|
1023 | NOP |
---|
1024 | |
---|
1025 | /* apply task's SR with EXL set so the eret will return properly */ |
---|
1026 | or t0, SR_EXL | SR_IE |
---|
1027 | mtc0 t0, C0_SR |
---|
1028 | NOP |
---|
1029 | |
---|
1030 | /* store new EPC value, which we can do since EXL=0 */ |
---|
1031 | LDREG t0, R_EPC*R_SZ(sp) |
---|
1032 | NOP |
---|
1033 | MTCO t0, C0_EPC |
---|
1034 | NOP |
---|
1035 | |
---|
1036 | #endif |
---|
1037 | |
---|
1038 | |
---|
1039 | |
---|
1040 | |
---|
1041 | |
---|
1042 | |
---|
1043 | #ifdef INSTRUMENT_EXECUTING_THREAD |
---|
1044 | lw t0,THREAD_EXECUTING |
---|
1045 | NOP |
---|
1046 | sw t0,0x8001FFF8 |
---|
1047 | #endif |
---|
1048 | |
---|
1049 | |
---|
1050 | /* |
---|
1051 | * prepare to get out of interrupt |
---|
1052 | * return from interrupt |
---|
1053 | * |
---|
1054 | * LABEL "exit interrupt (simple case):" |
---|
1055 | * prepare to get out of interrupt |
---|
1056 | * return from interrupt |
---|
1057 | */ |
---|
1058 | |
---|
1059 | _ISR_Handler_exit: |
---|
1060 | /* |
---|
1061 | ** Skip the SR restore because its a global register. _CPU_Context_switch_restore |
---|
1062 | ** adjusts it according to each task's configuration. If we didn't dispatch, the |
---|
1063 | ** SR value isn't changed, so all we need to do is return. |
---|
1064 | ** |
---|
1065 | */ |
---|
1066 | /* restore context from stack */ |
---|
1067 | |
---|
1068 | #ifdef INSTRUMENT_EXECUTING_THREAD |
---|
1069 | lw t0,THREAD_EXECUTING |
---|
1070 | NOP |
---|
1071 | sw t0, 0x8001FFFC |
---|
1072 | #endif |
---|
1073 | |
---|
1074 | LDREG t8, R_MDLO*R_SZ(sp) |
---|
1075 | LDREG t0, R_T0*R_SZ(sp) |
---|
1076 | mtlo t8 |
---|
1077 | LDREG t8, R_MDHI*R_SZ(sp) |
---|
1078 | LDREG t1, R_T1*R_SZ(sp) |
---|
1079 | mthi t8 |
---|
1080 | LDREG t2, R_T2*R_SZ(sp) |
---|
1081 | LDREG t3, R_T3*R_SZ(sp) |
---|
1082 | LDREG t4, R_T4*R_SZ(sp) |
---|
1083 | LDREG t5, R_T5*R_SZ(sp) |
---|
1084 | LDREG t6, R_T6*R_SZ(sp) |
---|
1085 | LDREG t7, R_T7*R_SZ(sp) |
---|
1086 | LDREG t8, R_T8*R_SZ(sp) |
---|
1087 | LDREG t9, R_T9*R_SZ(sp) |
---|
1088 | LDREG gp, R_GP*R_SZ(sp) |
---|
1089 | LDREG fp, R_FP*R_SZ(sp) |
---|
1090 | LDREG ra, R_RA*R_SZ(sp) |
---|
1091 | LDREG a0, R_A0*R_SZ(sp) |
---|
1092 | LDREG a1, R_A1*R_SZ(sp) |
---|
1093 | LDREG a2, R_A2*R_SZ(sp) |
---|
1094 | LDREG a3, R_A3*R_SZ(sp) |
---|
1095 | LDREG v1, R_V1*R_SZ(sp) |
---|
1096 | LDREG v0, R_V0*R_SZ(sp) |
---|
1097 | |
---|
1098 | #if __mips == 1 |
---|
1099 | LDREG k1, R_EPC*R_SZ(sp) |
---|
1100 | #endif |
---|
1101 | |
---|
1102 | .set noat |
---|
1103 | LDREG AT, R_AT*R_SZ(sp) |
---|
1104 | .set at |
---|
1105 | |
---|
1106 | ADDIU sp,sp,EXCP_STACK_SIZE |
---|
1107 | |
---|
1108 | #if (__mips == 3) || (__mips == 32) |
---|
1109 | eret |
---|
1110 | #elif __mips == 1 |
---|
1111 | j k1 |
---|
1112 | rfe |
---|
1113 | #endif |
---|
1114 | NOP |
---|
1115 | |
---|
1116 | .set reorder |
---|
1117 | ENDFRAME(_ISR_Handler) |
---|
1118 | |
---|
1119 | |
---|
1120 | FRAME(mips_break,sp,0,ra) |
---|
1121 | .set noreorder |
---|
1122 | break 0x0 /* this statement must be first in this function, assumed so by mips-stub.c */ |
---|
1123 | NOP |
---|
1124 | j ra |
---|
1125 | NOP |
---|
1126 | .set reorder |
---|
1127 | ENDFRAME(mips_break) |
---|
1128 | |
---|