1 | /* |
---|
2 | * This file contains the basic algorithms for all assembly code used |
---|
3 | * in an specific CPU port of RTEMS. These algorithms must be implemented |
---|
4 | * in assembly language |
---|
5 | * |
---|
6 | * History: |
---|
7 | * Baseline: no_cpu |
---|
8 | * 1996: Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com> |
---|
9 | * COPYRIGHT (c) 1996 by Transition Networks Inc. |
---|
10 | * To anyone who acknowledges that the modifications to this file to |
---|
11 | * port it to the MIPS64ORION are provided "AS IS" without any |
---|
12 | * express or implied warranty: |
---|
13 | * permission to use, copy, modify, and distribute this file |
---|
14 | * for any purpose is hereby granted without fee, provided that |
---|
15 | * the above copyright notice and this notice appears in all |
---|
16 | * copies, and that the name of Transition Networks not be used in |
---|
17 | * advertising or publicity pertaining to distribution of the |
---|
18 | * software without specific, written prior permission. Transition |
---|
19 | * Networks makes no representations about the suitability |
---|
20 | * of this software for any purpose. |
---|
21 | * 2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become |
---|
22 | * the baseline of the more general MIPS port. |
---|
23 | * 2001: Joel Sherrill <joel@OARcorp.com> continued this rework, |
---|
24 | * rewriting as much as possible in C and added the JMR3904 BSP |
---|
25 | * so testing could be performed on a simulator. |
---|
26 | * 2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR |
---|
27 | * performance, tweaking this code and the isr vectoring routines |
---|
28 | * to reduce overhead & latencies. Added optional |
---|
29 | * instrumentation as well. |
---|
30 | * 2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S, |
---|
31 | * cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels |
---|
32 | * and deferred FP contexts. |
---|
33 | * 2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing |
---|
34 | * by increasing the amount of context saved/restored. |
---|
35 | * |
---|
36 | * COPYRIGHT (c) 1989-2002. |
---|
37 | * On-Line Applications Research Corporation (OAR). |
---|
38 | * |
---|
39 | * The license and distribution terms for this file may be |
---|
40 | * found in the file LICENSE in this distribution or at |
---|
41 | * http://www.OARcorp.com/rtems/license.html. |
---|
42 | * |
---|
43 | * $Id$ |
---|
44 | */ |
---|
45 | |
---|
46 | #include <asm.h> |
---|
47 | #include "iregdef.h" |
---|
48 | #include "idtcpu.h" |
---|
49 | |
---|
50 | #define ASSEMBLY_ONLY |
---|
51 | #include <rtems/score/cpu.h> |
---|
52 | |
---|
53 | |
---|
54 | /* enable debugging shadow writes to misc ram, this is a vestigal |
---|
55 | * Mongoose-ism debug tool- but may be handy in the future so we |
---|
56 | * left it in... |
---|
57 | */ |
---|
58 | |
---|
59 | #define INSTRUMENT_ISR_VECTORING |
---|
60 | /* #define INSTRUMENT_EXECUTING_THREAD */ |
---|
61 | |
---|
62 | |
---|
63 | |
---|
64 | /* Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx ) |
---|
65 | * and MIPS ISA Level 1 (R3xxx). |
---|
66 | */ |
---|
67 | |
---|
68 | #if __mips == 3 |
---|
69 | /* 64 bit register operations */ |
---|
70 | #define NOP |
---|
71 | #define ADD dadd |
---|
72 | #define STREG sd |
---|
73 | #define LDREG ld |
---|
74 | #define MFCO dmfc0 |
---|
75 | #define MTCO dmtc0 |
---|
76 | #define ADDU addu |
---|
77 | #define ADDIU addiu |
---|
78 | #define R_SZ 8 |
---|
79 | #define F_SZ 8 |
---|
80 | #define SZ_INT 8 |
---|
81 | #define SZ_INT_POW2 3 |
---|
82 | |
---|
83 | /* XXX if we don't always want 64 bit register ops, then another ifdef */ |
---|
84 | |
---|
85 | #elif __mips == 1 |
---|
86 | /* 32 bit register operations*/ |
---|
87 | #define NOP nop |
---|
88 | #define ADD add |
---|
89 | #define STREG sw |
---|
90 | #define LDREG lw |
---|
91 | #define MFCO mfc0 |
---|
92 | #define MTCO mtc0 |
---|
93 | #define ADDU add |
---|
94 | #define ADDIU addi |
---|
95 | #define R_SZ 4 |
---|
96 | #define F_SZ 4 |
---|
97 | #define SZ_INT 4 |
---|
98 | #define SZ_INT_POW2 2 |
---|
99 | #else |
---|
100 | #error "mips assembly: what size registers do I deal with?" |
---|
101 | #endif |
---|
102 | |
---|
103 | |
---|
104 | #define ISR_VEC_SIZE 4 |
---|
105 | #define EXCP_STACK_SIZE (NREGS*R_SZ) |
---|
106 | |
---|
107 | |
---|
108 | #ifdef __GNUC__ |
---|
109 | #define ASM_EXTERN(x,size) .extern x,size |
---|
110 | #else |
---|
111 | #define ASM_EXTERN(x,size) |
---|
112 | #endif |
---|
113 | |
---|
114 | /* NOTE: these constants must match the Context_Control structure in cpu.h */ |
---|
115 | #define S0_OFFSET 0 |
---|
116 | #define S1_OFFSET 1 |
---|
117 | #define S2_OFFSET 2 |
---|
118 | #define S3_OFFSET 3 |
---|
119 | #define S4_OFFSET 4 |
---|
120 | #define S5_OFFSET 5 |
---|
121 | #define S6_OFFSET 6 |
---|
122 | #define S7_OFFSET 7 |
---|
123 | #define SP_OFFSET 8 |
---|
124 | #define FP_OFFSET 9 |
---|
125 | #define RA_OFFSET 10 |
---|
126 | #define C0_SR_OFFSET 11 |
---|
127 | #define C0_EPC_OFFSET 12 |
---|
128 | |
---|
129 | /* NOTE: these constants must match the Context_Control_fp structure in cpu.h */ |
---|
130 | #define FP0_OFFSET 0 |
---|
131 | #define FP1_OFFSET 1 |
---|
132 | #define FP2_OFFSET 2 |
---|
133 | #define FP3_OFFSET 3 |
---|
134 | #define FP4_OFFSET 4 |
---|
135 | #define FP5_OFFSET 5 |
---|
136 | #define FP6_OFFSET 6 |
---|
137 | #define FP7_OFFSET 7 |
---|
138 | #define FP8_OFFSET 8 |
---|
139 | #define FP9_OFFSET 9 |
---|
140 | #define FP10_OFFSET 10 |
---|
141 | #define FP11_OFFSET 11 |
---|
142 | #define FP12_OFFSET 12 |
---|
143 | #define FP13_OFFSET 13 |
---|
144 | #define FP14_OFFSET 14 |
---|
145 | #define FP15_OFFSET 15 |
---|
146 | #define FP16_OFFSET 16 |
---|
147 | #define FP17_OFFSET 17 |
---|
148 | #define FP18_OFFSET 18 |
---|
149 | #define FP19_OFFSET 19 |
---|
150 | #define FP20_OFFSET 20 |
---|
151 | #define FP21_OFFSET 21 |
---|
152 | #define FP22_OFFSET 22 |
---|
153 | #define FP23_OFFSET 23 |
---|
154 | #define FP24_OFFSET 24 |
---|
155 | #define FP25_OFFSET 25 |
---|
156 | #define FP26_OFFSET 26 |
---|
157 | #define FP27_OFFSET 27 |
---|
158 | #define FP28_OFFSET 28 |
---|
159 | #define FP29_OFFSET 29 |
---|
160 | #define FP30_OFFSET 30 |
---|
161 | #define FP31_OFFSET 31 |
---|
162 | |
---|
163 | |
---|
164 | ASM_EXTERN(__exceptionStackFrame, SZ_INT) |
---|
165 | |
---|
166 | |
---|
167 | |
---|
168 | /* |
---|
169 | * _CPU_Context_save_fp_context |
---|
170 | * |
---|
171 | * This routine is responsible for saving the FP context |
---|
172 | * at *fp_context_ptr. If the point to load the FP context |
---|
173 | * from is changed then the pointer is modified by this routine. |
---|
174 | * |
---|
175 | * Sometimes a macro implementation of this is in cpu.h which dereferences |
---|
176 | * the ** and a similarly named routine in this file is passed something |
---|
177 | * like a (Context_Control_fp *). The general rule on making this decision |
---|
178 | * is to avoid writing assembly language. |
---|
179 | */ |
---|
180 | |
---|
181 | /* void _CPU_Context_save_fp( |
---|
182 | * void **fp_context_ptr |
---|
183 | * ); |
---|
184 | */ |
---|
185 | |
---|
186 | #if ( CPU_HARDWARE_FP == TRUE ) |
---|
187 | FRAME(_CPU_Context_save_fp,sp,0,ra) |
---|
188 | .set noreorder |
---|
189 | .set noat |
---|
190 | |
---|
191 | /* |
---|
192 | ** Make sure the FPU is on before we save state. This code |
---|
193 | ** is here because the FPU context switch might occur when an |
---|
194 | ** integer task is switching out with a FP task switching in. |
---|
195 | */ |
---|
196 | MFC0 t0,C0_SR |
---|
197 | li t2,SR_CU1 |
---|
198 | move t1,t0 |
---|
199 | or t0,t2 /* turn on the fpu */ |
---|
200 | #if __mips == 3 |
---|
201 | li t2,SR_EXL | SR_IE |
---|
202 | #elif __mips == 1 |
---|
203 | li t2,SR_IEC |
---|
204 | #endif |
---|
205 | not t2 |
---|
206 | and t0,t2 /* turn off interrupts */ |
---|
207 | MTC0 t0,C0_SR |
---|
208 | |
---|
209 | ld a1,(a0) |
---|
210 | move t0,ra |
---|
211 | jal _CPU_Context_save_fp_from_exception |
---|
212 | NOP |
---|
213 | |
---|
214 | /* |
---|
215 | ** Reassert the task's state because we've not saved it yet. |
---|
216 | */ |
---|
217 | MTC0 t1,C0_SR |
---|
218 | j t0 |
---|
219 | NOP |
---|
220 | |
---|
221 | .globl _CPU_Context_save_fp_from_exception |
---|
222 | _CPU_Context_save_fp_from_exception: |
---|
223 | swc1 $f0,FP0_OFFSET*F_SZ(a1) |
---|
224 | swc1 $f1,FP1_OFFSET*F_SZ(a1) |
---|
225 | swc1 $f2,FP2_OFFSET*F_SZ(a1) |
---|
226 | swc1 $f3,FP3_OFFSET*F_SZ(a1) |
---|
227 | swc1 $f4,FP4_OFFSET*F_SZ(a1) |
---|
228 | swc1 $f5,FP5_OFFSET*F_SZ(a1) |
---|
229 | swc1 $f6,FP6_OFFSET*F_SZ(a1) |
---|
230 | swc1 $f7,FP7_OFFSET*F_SZ(a1) |
---|
231 | swc1 $f8,FP8_OFFSET*F_SZ(a1) |
---|
232 | swc1 $f9,FP9_OFFSET*F_SZ(a1) |
---|
233 | swc1 $f10,FP10_OFFSET*F_SZ(a1) |
---|
234 | swc1 $f11,FP11_OFFSET*F_SZ(a1) |
---|
235 | swc1 $f12,FP12_OFFSET*F_SZ(a1) |
---|
236 | swc1 $f13,FP13_OFFSET*F_SZ(a1) |
---|
237 | swc1 $f14,FP14_OFFSET*F_SZ(a1) |
---|
238 | swc1 $f15,FP15_OFFSET*F_SZ(a1) |
---|
239 | swc1 $f16,FP16_OFFSET*F_SZ(a1) |
---|
240 | swc1 $f17,FP17_OFFSET*F_SZ(a1) |
---|
241 | swc1 $f18,FP18_OFFSET*F_SZ(a1) |
---|
242 | swc1 $f19,FP19_OFFSET*F_SZ(a1) |
---|
243 | swc1 $f20,FP20_OFFSET*F_SZ(a1) |
---|
244 | swc1 $f21,FP21_OFFSET*F_SZ(a1) |
---|
245 | swc1 $f22,FP22_OFFSET*F_SZ(a1) |
---|
246 | swc1 $f23,FP23_OFFSET*F_SZ(a1) |
---|
247 | swc1 $f24,FP24_OFFSET*F_SZ(a1) |
---|
248 | swc1 $f25,FP25_OFFSET*F_SZ(a1) |
---|
249 | swc1 $f26,FP26_OFFSET*F_SZ(a1) |
---|
250 | swc1 $f27,FP27_OFFSET*F_SZ(a1) |
---|
251 | swc1 $f28,FP28_OFFSET*F_SZ(a1) |
---|
252 | swc1 $f29,FP29_OFFSET*F_SZ(a1) |
---|
253 | swc1 $f30,FP30_OFFSET*F_SZ(a1) |
---|
254 | swc1 $f31,FP31_OFFSET*F_SZ(a1) |
---|
255 | j ra |
---|
256 | NOP |
---|
257 | .set at |
---|
258 | ENDFRAME(_CPU_Context_save_fp) |
---|
259 | #endif |
---|
260 | |
---|
261 | /* |
---|
262 | * _CPU_Context_restore_fp_context |
---|
263 | * |
---|
264 | * This routine is responsible for restoring the FP context |
---|
265 | * at *fp_context_ptr. If the point to load the FP context |
---|
266 | * from is changed then the pointer is modified by this routine. |
---|
267 | * |
---|
268 | * Sometimes a macro implementation of this is in cpu.h which dereferences |
---|
269 | * the ** and a similarly named routine in this file is passed something |
---|
270 | * like a (Context_Control_fp *). The general rule on making this decision |
---|
271 | * is to avoid writing assembly language. |
---|
272 | */ |
---|
273 | |
---|
274 | /* void _CPU_Context_restore_fp( |
---|
275 | * void **fp_context_ptr |
---|
276 | * ) |
---|
277 | */ |
---|
278 | |
---|
279 | #if ( CPU_HARDWARE_FP == TRUE ) |
---|
280 | FRAME(_CPU_Context_restore_fp,sp,0,ra) |
---|
281 | .set noat |
---|
282 | .set noreorder |
---|
283 | |
---|
284 | /* |
---|
285 | ** Make sure the FPU is on before we retrieve state. This code |
---|
286 | ** is here because the FPU context switch might occur when an |
---|
287 | ** integer task is switching out with a FP task switching in. |
---|
288 | */ |
---|
289 | MFC0 t0,C0_SR |
---|
290 | li t2,SR_CU1 |
---|
291 | move t1,t0 |
---|
292 | or t0,t2 /* turn on the fpu */ |
---|
293 | #if __mips == 3 |
---|
294 | li t2,SR_EXL | SR_IE |
---|
295 | #elif __mips == 1 |
---|
296 | li t2,SR_IEC |
---|
297 | #endif |
---|
298 | not t2 |
---|
299 | and t0,t2 /* turn off interrupts */ |
---|
300 | MTC0 t0,C0_SR |
---|
301 | |
---|
302 | ld a1,(a0) |
---|
303 | move t0,ra |
---|
304 | jal _CPU_Context_restore_fp_from_exception |
---|
305 | NOP |
---|
306 | |
---|
307 | /* |
---|
308 | ** Reassert the old task's state because we've not restored the |
---|
309 | ** new one yet. |
---|
310 | */ |
---|
311 | MTC0 t1,C0_SR |
---|
312 | j t0 |
---|
313 | NOP |
---|
314 | |
---|
315 | .globl _CPU_Context_restore_fp_from_exception |
---|
316 | _CPU_Context_restore_fp_from_exception: |
---|
317 | lwc1 $f0,FP0_OFFSET*4(a1) |
---|
318 | lwc1 $f1,FP1_OFFSET*4(a1) |
---|
319 | lwc1 $f2,FP2_OFFSET*4(a1) |
---|
320 | lwc1 $f3,FP3_OFFSET*4(a1) |
---|
321 | lwc1 $f4,FP4_OFFSET*4(a1) |
---|
322 | lwc1 $f5,FP5_OFFSET*4(a1) |
---|
323 | lwc1 $f6,FP6_OFFSET*4(a1) |
---|
324 | lwc1 $f7,FP7_OFFSET*4(a1) |
---|
325 | lwc1 $f8,FP8_OFFSET*4(a1) |
---|
326 | lwc1 $f9,FP9_OFFSET*4(a1) |
---|
327 | lwc1 $f10,FP10_OFFSET*4(a1) |
---|
328 | lwc1 $f11,FP11_OFFSET*4(a1) |
---|
329 | lwc1 $f12,FP12_OFFSET*4(a1) |
---|
330 | lwc1 $f13,FP13_OFFSET*4(a1) |
---|
331 | lwc1 $f14,FP14_OFFSET*4(a1) |
---|
332 | lwc1 $f15,FP15_OFFSET*4(a1) |
---|
333 | lwc1 $f16,FP16_OFFSET*4(a1) |
---|
334 | lwc1 $f17,FP17_OFFSET*4(a1) |
---|
335 | lwc1 $f18,FP18_OFFSET*4(a1) |
---|
336 | lwc1 $f19,FP19_OFFSET*4(a1) |
---|
337 | lwc1 $f20,FP20_OFFSET*4(a1) |
---|
338 | lwc1 $f21,FP21_OFFSET*4(a1) |
---|
339 | lwc1 $f22,FP22_OFFSET*4(a1) |
---|
340 | lwc1 $f23,FP23_OFFSET*4(a1) |
---|
341 | lwc1 $f24,FP24_OFFSET*4(a1) |
---|
342 | lwc1 $f25,FP25_OFFSET*4(a1) |
---|
343 | lwc1 $f26,FP26_OFFSET*4(a1) |
---|
344 | lwc1 $f27,FP27_OFFSET*4(a1) |
---|
345 | lwc1 $f28,FP28_OFFSET*4(a1) |
---|
346 | lwc1 $f29,FP29_OFFSET*4(a1) |
---|
347 | lwc1 $f30,FP30_OFFSET*4(a1) |
---|
348 | lwc1 $f31,FP31_OFFSET*4(a1) |
---|
349 | j ra |
---|
350 | NOP |
---|
351 | .set at |
---|
352 | ENDFRAME(_CPU_Context_restore_fp) |
---|
353 | #endif |
---|
354 | |
---|
355 | /* _CPU_Context_switch |
---|
356 | * |
---|
357 | * This routine performs a normal non-FP context switch. |
---|
358 | */ |
---|
359 | |
---|
360 | /* void _CPU_Context_switch( |
---|
361 | * Context_Control *run, |
---|
362 | * Context_Control *heir |
---|
363 | * ) |
---|
364 | */ |
---|
365 | |
---|
366 | FRAME(_CPU_Context_switch,sp,0,ra) |
---|
367 | .set noreorder |
---|
368 | |
---|
369 | MFC0 t0,C0_SR |
---|
370 | #if __mips == 3 |
---|
371 | li t1,SR_EXL | SR_IE |
---|
372 | #elif __mips == 1 |
---|
373 | li t1,SR_IEC |
---|
374 | #endif |
---|
375 | STREG t0,C0_SR_OFFSET*R_SZ(a0) /* save the task's SR */ |
---|
376 | not t1 |
---|
377 | and t0,t1 /* mask off interrupts while we context switch */ |
---|
378 | MTC0 t0,C0_SR |
---|
379 | NOP |
---|
380 | |
---|
381 | STREG ra,RA_OFFSET*R_SZ(a0) /* save current context */ |
---|
382 | STREG sp,SP_OFFSET*R_SZ(a0) |
---|
383 | STREG fp,FP_OFFSET*R_SZ(a0) |
---|
384 | STREG s0,S0_OFFSET*R_SZ(a0) |
---|
385 | STREG s1,S1_OFFSET*R_SZ(a0) |
---|
386 | STREG s2,S2_OFFSET*R_SZ(a0) |
---|
387 | STREG s3,S3_OFFSET*R_SZ(a0) |
---|
388 | STREG s4,S4_OFFSET*R_SZ(a0) |
---|
389 | STREG s5,S5_OFFSET*R_SZ(a0) |
---|
390 | STREG s6,S6_OFFSET*R_SZ(a0) |
---|
391 | STREG s7,S7_OFFSET*R_SZ(a0) |
---|
392 | |
---|
393 | |
---|
394 | /* |
---|
395 | ** this code grabs the userspace EPC if we're dispatching from |
---|
396 | ** an interrupt frame or fakes an address as the EPC if we're |
---|
397 | ** not. This is for the gdbstub's benefit so it can know |
---|
398 | ** where each thread is running. |
---|
399 | ** |
---|
400 | ** Its value is only set when calling threadDispatch from |
---|
401 | ** the interrupt handler and is cleared immediately when this |
---|
402 | ** routine gets it. |
---|
403 | */ |
---|
404 | |
---|
405 | la t0,__exceptionStackFrame /* see if we're coming in from an exception */ |
---|
406 | LDREG t1, (t0) |
---|
407 | NOP |
---|
408 | beqz t1,1f |
---|
409 | |
---|
410 | STREG zero, (t0) /* and clear it */ |
---|
411 | NOP |
---|
412 | LDREG t0,R_EPC*R_SZ(t1) /* get the userspace EPC from the frame */ |
---|
413 | b 2f |
---|
414 | |
---|
415 | 1: la t0,_Thread_Dispatch /* if ==0, we're switched out */ |
---|
416 | |
---|
417 | 2: STREG t0,C0_EPC_OFFSET*R_SZ(a0) |
---|
418 | |
---|
419 | |
---|
420 | _CPU_Context_switch_restore: |
---|
421 | LDREG ra,RA_OFFSET*R_SZ(a1) /* restore context */ |
---|
422 | LDREG sp,SP_OFFSET*R_SZ(a1) |
---|
423 | LDREG fp,FP_OFFSET*R_SZ(a1) |
---|
424 | LDREG s0,S0_OFFSET*R_SZ(a1) |
---|
425 | LDREG s1,S1_OFFSET*R_SZ(a1) |
---|
426 | LDREG s2,S2_OFFSET*R_SZ(a1) |
---|
427 | LDREG s3,S3_OFFSET*R_SZ(a1) |
---|
428 | LDREG s4,S4_OFFSET*R_SZ(a1) |
---|
429 | LDREG s5,S5_OFFSET*R_SZ(a1) |
---|
430 | LDREG s6,S6_OFFSET*R_SZ(a1) |
---|
431 | LDREG s7,S7_OFFSET*R_SZ(a1) |
---|
432 | |
---|
433 | LDREG t0, C0_SR_OFFSET*R_SZ(a1) |
---|
434 | |
---|
435 | // NOP |
---|
436 | //#if __mips == 3 |
---|
437 | // andi t0,SR_EXL |
---|
438 | // bnez t0,_CPU_Context_1 /* set exception level from restore context */ |
---|
439 | // li t0,~SR_EXL |
---|
440 | // MFC0 t1,C0_SR |
---|
441 | // NOP |
---|
442 | // and t1,t0 |
---|
443 | // MTC0 t1,C0_SR |
---|
444 | // |
---|
445 | //#elif __mips == 1 |
---|
446 | // |
---|
447 | // andi t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */ |
---|
448 | // beq t0,$0,_CPU_Context_1 /* set level from restore context */ |
---|
449 | // MFC0 t0,C0_SR |
---|
450 | // NOP |
---|
451 | // or t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled */ |
---|
452 | // MTC0 t0,C0_SR /* set with enabled */ |
---|
453 | // NOP |
---|
454 | |
---|
455 | |
---|
456 | /* |
---|
457 | ** Incorporate the new task's FP coprocessor state and interrupt mask/enable |
---|
458 | ** into the status register. We jump thru the requisite hoops to ensure we |
---|
459 | ** maintain all other SR bits as global values. |
---|
460 | ** |
---|
461 | ** Get the thread's FPU enable, int mask & int enable bits. Although we keep the |
---|
462 | ** software int enables on a per-task basis, the rtems_task_create |
---|
463 | ** Interrupt Level & int level manipulation functions cannot enable/disable them, |
---|
464 | ** so they are automatically enabled for all tasks. To turn them off, a thread |
---|
465 | ** must itself manipulate the SR register. |
---|
466 | ** |
---|
467 | ** Although something of a hack on this processor, we treat the SR register |
---|
468 | ** int enables as the RTEMS interrupt level. We use the int level |
---|
469 | ** value as a bitmask, not as any sort of greater than/less than metric. |
---|
470 | ** Manipulation of a task's interrupt level directly corresponds to manipulation |
---|
471 | ** of that task's SR bits, as seen in cpu.c |
---|
472 | ** |
---|
473 | ** Note, interrupts are disabled before context is saved, though the thread's |
---|
474 | ** interrupt enable state is recorded. The task swapping in will apply its |
---|
475 | ** specific SR bits, including interrupt enable. If further task-specific |
---|
476 | ** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and |
---|
477 | ** cpu.h task initialization code that will be affected. |
---|
478 | */ |
---|
479 | |
---|
480 | li t2,SR_CU1 |
---|
481 | or t2,SR_IMASK |
---|
482 | |
---|
483 | /* int enable bits */ |
---|
484 | #if __mips == 3 |
---|
485 | or t2,SR_EXL + SR_IE |
---|
486 | #elif __mips == 1 |
---|
487 | or t2,SR_IEC + SR_IEP /* save current & previous int enable */ |
---|
488 | #endif |
---|
489 | and t0,t2 /* keep only the per-task bits */ |
---|
490 | |
---|
491 | MFC0 t1,C0_SR /* grab the current SR */ |
---|
492 | not t2 |
---|
493 | and t1,t2 /* mask off the old task's bits */ |
---|
494 | or t1,t0 /* or in the new task's bits */ |
---|
495 | MTC0 t1,C0_SR /* and load the new SR */ |
---|
496 | NOP |
---|
497 | |
---|
498 | /* _CPU_Context_1: */ |
---|
499 | j ra |
---|
500 | NOP |
---|
501 | ENDFRAME(_CPU_Context_switch) |
---|
502 | |
---|
503 | |
---|
504 | /* |
---|
505 | * _CPU_Context_restore |
---|
506 | * |
---|
507 | * This routine is generally used only to restart self in an |
---|
508 | * efficient manner. It may simply be a label in _CPU_Context_switch. |
---|
509 | * |
---|
510 | * NOTE: May be unnecessary to reload some registers. |
---|
511 | * |
---|
512 | * void _CPU_Context_restore( |
---|
513 | * Context_Control *new_context |
---|
514 | * ); |
---|
515 | */ |
---|
516 | |
---|
517 | FRAME(_CPU_Context_restore,sp,0,ra) |
---|
518 | .set noreorder |
---|
519 | move a1,a0 |
---|
520 | j _CPU_Context_switch_restore |
---|
521 | NOP |
---|
522 | |
---|
523 | ENDFRAME(_CPU_Context_restore) |
---|
524 | |
---|
525 | |
---|
526 | ASM_EXTERN(_ISR_Nest_level, SZ_INT) |
---|
527 | ASM_EXTERN(_Thread_Dispatch_disable_level,SZ_INT) |
---|
528 | ASM_EXTERN(_Context_Switch_necessary,SZ_INT) |
---|
529 | ASM_EXTERN(_ISR_Signals_to_thread_executing,SZ_INT) |
---|
530 | ASM_EXTERN(_Thread_Executing,SZ_INT) |
---|
531 | |
---|
532 | .extern _Thread_Dispatch |
---|
533 | .extern _ISR_Vector_table |
---|
534 | |
---|
535 | |
---|
536 | |
---|
537 | |
---|
538 | |
---|
539 | /* void _DBG_Handler() |
---|
540 | * |
---|
541 | * This routine services the (at least) MIPS1 debug vector, |
---|
542 | * only used the the hardware debugging features. This code, |
---|
543 | * while optional, is best located here because its intrinsically |
---|
544 | * associated with exceptions in general & thus tied pretty |
---|
545 | * closely to _ISR_Handler. |
---|
546 | * |
---|
547 | */ |
---|
548 | |
---|
549 | |
---|
550 | FRAME(_DBG_Handler,sp,0,ra) |
---|
551 | .set noreorder |
---|
552 | la k0,_ISR_Handler |
---|
553 | j k0 |
---|
554 | NOP |
---|
555 | .set reorder |
---|
556 | ENDFRAME(_DBG_Handler) |
---|
557 | |
---|
558 | |
---|
559 | |
---|
560 | |
---|
561 | |
---|
562 | /* void __ISR_Handler() |
---|
563 | * |
---|
564 | * This routine provides the RTEMS interrupt management. |
---|
565 | * |
---|
566 | * void _ISR_Handler() |
---|
567 | * |
---|
568 | * |
---|
569 | * This discussion ignores a lot of the ugly details in a real |
---|
570 | * implementation such as saving enough registers/state to be |
---|
571 | * able to do something real. Keep in mind that the goal is |
---|
572 | * to invoke a user's ISR handler which is written in C and |
---|
573 | * uses a certain set of registers. |
---|
574 | * |
---|
575 | * Also note that the exact order is to a large extent flexible. |
---|
576 | * Hardware will dictate a sequence for a certain subset of |
---|
577 | * _ISR_Handler while requirements for setting |
---|
578 | * |
---|
579 | * At entry to "common" _ISR_Handler, the vector number must be |
---|
580 | * available. On some CPUs the hardware puts either the vector |
---|
581 | * number or the offset into the vector table for this ISR in a |
---|
582 | * known place. If the hardware does not give us this information, |
---|
583 | * then the assembly portion of RTEMS for this port will contain |
---|
584 | * a set of distinct interrupt entry points which somehow place |
---|
585 | * the vector number in a known place (which is safe if another |
---|
586 | * interrupt nests this one) and branches to _ISR_Handler. |
---|
587 | * |
---|
588 | */ |
---|
589 | |
---|
590 | FRAME(_ISR_Handler,sp,0,ra) |
---|
591 | .set noreorder |
---|
592 | |
---|
593 | /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */ |
---|
594 | |
---|
595 | /* wastes a lot of stack space for context?? */ |
---|
596 | ADDIU sp,sp,-EXCP_STACK_SIZE |
---|
597 | |
---|
598 | STREG ra, R_RA*R_SZ(sp) /* store ra on the stack */ |
---|
599 | STREG v0, R_V0*R_SZ(sp) |
---|
600 | STREG v1, R_V1*R_SZ(sp) |
---|
601 | STREG a0, R_A0*R_SZ(sp) |
---|
602 | STREG a1, R_A1*R_SZ(sp) |
---|
603 | STREG a2, R_A2*R_SZ(sp) |
---|
604 | STREG a3, R_A3*R_SZ(sp) |
---|
605 | STREG t0, R_T0*R_SZ(sp) |
---|
606 | STREG t1, R_T1*R_SZ(sp) |
---|
607 | STREG t2, R_T2*R_SZ(sp) |
---|
608 | STREG t3, R_T3*R_SZ(sp) |
---|
609 | STREG t4, R_T4*R_SZ(sp) |
---|
610 | STREG t5, R_T5*R_SZ(sp) |
---|
611 | STREG t6, R_T6*R_SZ(sp) |
---|
612 | STREG t7, R_T7*R_SZ(sp) |
---|
613 | mflo t0 |
---|
614 | STREG t8, R_T8*R_SZ(sp) |
---|
615 | STREG t0, R_MDLO*R_SZ(sp) |
---|
616 | STREG t9, R_T9*R_SZ(sp) |
---|
617 | mfhi t0 |
---|
618 | STREG gp, R_GP*R_SZ(sp) |
---|
619 | STREG t0, R_MDHI*R_SZ(sp) |
---|
620 | STREG fp, R_FP*R_SZ(sp) |
---|
621 | |
---|
622 | .set noat |
---|
623 | STREG AT, R_AT*R_SZ(sp) |
---|
624 | .set at |
---|
625 | |
---|
626 | MFC0 t0,C0_SR |
---|
627 | MFC0 t1,C0_EPC |
---|
628 | STREG t0,R_SR*R_SZ(sp) |
---|
629 | STREG t1,R_EPC*R_SZ(sp) |
---|
630 | |
---|
631 | |
---|
632 | #ifdef INSTRUMENT_EXECUTING_THREAD |
---|
633 | lw t2, _Thread_Executing |
---|
634 | NOP |
---|
635 | sw t2, 0x8001FFF0 |
---|
636 | #endif |
---|
637 | |
---|
638 | /* determine if an interrupt generated this exception */ |
---|
639 | |
---|
640 | MFC0 t0,C0_CAUSE |
---|
641 | NOP |
---|
642 | |
---|
643 | and t1,t0,CAUSE_EXCMASK |
---|
644 | beq t1, 0, _ISR_Handler_1 |
---|
645 | |
---|
646 | _ISR_Handler_Exception: |
---|
647 | |
---|
648 | /* |
---|
649 | sw t0,0x8001FF00 |
---|
650 | sw t1,0x8001FF04 |
---|
651 | */ |
---|
652 | |
---|
653 | /* If we return from the exception, it is assumed nothing |
---|
654 | * bad is going on and we can continue to run normally. |
---|
655 | * But we want to save the entire CPU context so exception |
---|
656 | * handlers can look at it and change it. |
---|
657 | * |
---|
658 | * NOTE: This is the path the debugger stub will take. |
---|
659 | */ |
---|
660 | |
---|
661 | /* already got t0 = cause in the interrupt test above */ |
---|
662 | STREG t0,R_CAUSE*R_SZ(sp) |
---|
663 | |
---|
664 | STREG sp, R_SP*R_SZ(sp) |
---|
665 | |
---|
666 | STREG s0,R_S0*R_SZ(sp) /* save s0 - s7 */ |
---|
667 | STREG s1,R_S1*R_SZ(sp) |
---|
668 | STREG s2,R_S2*R_SZ(sp) |
---|
669 | STREG s3,R_S3*R_SZ(sp) |
---|
670 | STREG s4,R_S4*R_SZ(sp) |
---|
671 | STREG s5,R_S5*R_SZ(sp) |
---|
672 | STREG s6,R_S6*R_SZ(sp) |
---|
673 | STREG s7,R_S7*R_SZ(sp) |
---|
674 | |
---|
675 | /* CP0 special registers */ |
---|
676 | |
---|
677 | MFC0 t0,C0_TAR |
---|
678 | MFC0 t1,C0_BADVADDR |
---|
679 | STREG t0,R_TAR*R_SZ(sp) |
---|
680 | STREG t1,R_BADVADDR*R_SZ(sp) |
---|
681 | |
---|
682 | #if ( CPU_HARDWARE_FP == TRUE ) |
---|
683 | MFC0 t0,C0_SR /* FPU is enabled, save state */ |
---|
684 | NOP |
---|
685 | srl t0,t0,16 |
---|
686 | andi t0,t0,(SR_CU1 >> 16) |
---|
687 | beqz t0, 1f |
---|
688 | NOP |
---|
689 | |
---|
690 | la a1,R_F0*R_SZ(sp) |
---|
691 | jal _CPU_Context_save_fp_from_exception |
---|
692 | NOP |
---|
693 | MFC1 t0,C1_REVISION |
---|
694 | MFC1 t1,C1_STATUS |
---|
695 | STREG t0,R_FEIR*R_SZ(sp) |
---|
696 | STREG t1,R_FCSR*R_SZ(sp) |
---|
697 | |
---|
698 | 1: |
---|
699 | #endif |
---|
700 | |
---|
701 | move a0,sp |
---|
702 | jal mips_vector_exceptions |
---|
703 | NOP |
---|
704 | |
---|
705 | |
---|
706 | /* |
---|
707 | ** note, if the exception vector returns, rely on it to have |
---|
708 | ** adjusted EPC so we will return to some correct address. If |
---|
709 | ** this is not done, we might get stuck in an infinite loop because |
---|
710 | ** we'll return to the instruction where the exception occured and |
---|
711 | ** it could throw again. |
---|
712 | ** |
---|
713 | ** It is expected the only code using the exception processing is |
---|
714 | ** either the gdb stub or some user code which is either going to |
---|
715 | ** panic or do something useful. |
---|
716 | */ |
---|
717 | |
---|
718 | |
---|
719 | /* ********************************************************************* |
---|
720 | * compute the address of the instruction we'll return to * |
---|
721 | |
---|
722 | LDREG t1, R_CAUSE*R_SZ(sp) |
---|
723 | LDREG t0, R_EPC*R_SZ(sp) |
---|
724 | |
---|
725 | * first see if the exception happened in the delay slot * |
---|
726 | li t3,CAUSE_BD |
---|
727 | AND t4,t1,t3 |
---|
728 | beqz t4,excnodelay |
---|
729 | NOP |
---|
730 | |
---|
731 | * it did, now see if the branch occured or not * |
---|
732 | li t3,CAUSE_BT |
---|
733 | AND t4,t1,t3 |
---|
734 | beqz t4,excnobranch |
---|
735 | NOP |
---|
736 | |
---|
737 | * branch was taken, we resume at the branch target * |
---|
738 | LDREG t0, R_TAR*R_SZ(sp) |
---|
739 | j excreturn |
---|
740 | NOP |
---|
741 | |
---|
742 | excnobranch: |
---|
743 | ADDU t0,R_SZ |
---|
744 | |
---|
745 | excnodelay: |
---|
746 | ADDU t0,R_SZ |
---|
747 | |
---|
748 | excreturn: |
---|
749 | STREG t0, R_EPC*R_SZ(sp) |
---|
750 | NOP |
---|
751 | ********************************************************************* */ |
---|
752 | |
---|
753 | |
---|
754 | /* if we're returning into mips_break, move to the next instruction */ |
---|
755 | |
---|
756 | LDREG t0,R_EPC*R_SZ(sp) |
---|
757 | la t1,mips_break |
---|
758 | xor t2,t0,t1 |
---|
759 | bnez t2,3f |
---|
760 | |
---|
761 | addu t0,R_SZ |
---|
762 | STREG t0,R_EPC*R_SZ(sp) |
---|
763 | NOP |
---|
764 | 3: |
---|
765 | |
---|
766 | |
---|
767 | |
---|
768 | |
---|
769 | #if ( CPU_HARDWARE_FP == TRUE ) |
---|
770 | MFC0 t0,C0_SR /* FPU is enabled, restore state */ |
---|
771 | NOP |
---|
772 | srl t0,t0,16 |
---|
773 | andi t0,t0,(SR_CU1 >> 16) |
---|
774 | beqz t0, 2f |
---|
775 | NOP |
---|
776 | |
---|
777 | la a1,R_F0*R_SZ(sp) |
---|
778 | jal _CPU_Context_restore_fp_from_exception |
---|
779 | NOP |
---|
780 | LDREG t0,R_FEIR*R_SZ(sp) |
---|
781 | LDREG t1,R_FCSR*R_SZ(sp) |
---|
782 | MTC1 t0,C1_REVISION |
---|
783 | MTC1 t1,C1_STATUS |
---|
784 | 2: |
---|
785 | #endif |
---|
786 | LDREG s0,R_S0*R_SZ(sp) /* restore s0 - s7 */ |
---|
787 | LDREG s1,R_S1*R_SZ(sp) |
---|
788 | LDREG s2,R_S2*R_SZ(sp) |
---|
789 | LDREG s3,R_S3*R_SZ(sp) |
---|
790 | LDREG s4,R_S4*R_SZ(sp) |
---|
791 | LDREG s5,R_S5*R_SZ(sp) |
---|
792 | LDREG s6,R_S6*R_SZ(sp) |
---|
793 | LDREG s7,R_S7*R_SZ(sp) |
---|
794 | |
---|
795 | /* do NOT restore the sp as this could mess up the world */ |
---|
796 | /* do NOT restore the cause as this could mess up the world */ |
---|
797 | |
---|
798 | j _ISR_Handler_exit |
---|
799 | NOP |
---|
800 | |
---|
801 | _ISR_Handler_1: |
---|
802 | |
---|
803 | MFC0 t1,C0_SR |
---|
804 | and t0,CAUSE_IPMASK |
---|
805 | and t0,t1 |
---|
806 | |
---|
807 | /* external interrupt not enabled, ignore */ |
---|
808 | /* but if it's not an exception or an interrupt, */ |
---|
809 | /* Then where did it come from??? */ |
---|
810 | |
---|
811 | beq t0,zero,_ISR_Handler_exit |
---|
812 | |
---|
813 | |
---|
814 | |
---|
815 | |
---|
816 | /* |
---|
817 | * save some or all context on stack |
---|
818 | * may need to save some special interrupt information for exit |
---|
819 | * |
---|
820 | * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) |
---|
821 | * if ( _ISR_Nest_level == 0 ) |
---|
822 | * switch to software interrupt stack |
---|
823 | * #endif |
---|
824 | */ |
---|
825 | |
---|
826 | /* |
---|
827 | * _ISR_Nest_level++; |
---|
828 | */ |
---|
829 | LDREG t0,_ISR_Nest_level |
---|
830 | NOP |
---|
831 | ADD t0,t0,1 |
---|
832 | STREG t0,_ISR_Nest_level |
---|
833 | /* |
---|
834 | * _Thread_Dispatch_disable_level++; |
---|
835 | */ |
---|
836 | LDREG t1,_Thread_Dispatch_disable_level |
---|
837 | NOP |
---|
838 | ADD t1,t1,1 |
---|
839 | STREG t1,_Thread_Dispatch_disable_level |
---|
840 | |
---|
841 | /* |
---|
842 | * Call the CPU model or BSP specific routine to decode the |
---|
843 | * interrupt source and actually vector to device ISR handlers. |
---|
844 | */ |
---|
845 | |
---|
846 | #ifdef INSTRUMENT_ISR_VECTORING |
---|
847 | NOP |
---|
848 | li t1, 1 |
---|
849 | sw t1, 0x8001e000 |
---|
850 | #endif |
---|
851 | |
---|
852 | move a0,sp |
---|
853 | jal mips_vector_isr_handlers |
---|
854 | NOP |
---|
855 | |
---|
856 | #ifdef INSTRUMENT_ISR_VECTORING |
---|
857 | li t1, 0 |
---|
858 | sw t1, 0x8001e000 |
---|
859 | NOP |
---|
860 | #endif |
---|
861 | |
---|
862 | /* |
---|
863 | * --_ISR_Nest_level; |
---|
864 | */ |
---|
865 | LDREG t2,_ISR_Nest_level |
---|
866 | NOP |
---|
867 | ADD t2,t2,-1 |
---|
868 | STREG t2,_ISR_Nest_level |
---|
869 | /* |
---|
870 | * --_Thread_Dispatch_disable_level; |
---|
871 | */ |
---|
872 | LDREG t1,_Thread_Dispatch_disable_level |
---|
873 | NOP |
---|
874 | ADD t1,t1,-1 |
---|
875 | STREG t1,_Thread_Dispatch_disable_level |
---|
876 | /* |
---|
877 | * if ( _Thread_Dispatch_disable_level || _ISR_Nest_level ) |
---|
878 | * goto the label "exit interrupt (simple case)" |
---|
879 | */ |
---|
880 | or t0,t2,t1 |
---|
881 | bne t0,zero,_ISR_Handler_exit |
---|
882 | NOP |
---|
883 | |
---|
884 | |
---|
885 | |
---|
886 | |
---|
887 | /* |
---|
888 | * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) |
---|
889 | * restore stack |
---|
890 | * #endif |
---|
891 | * |
---|
892 | * if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing ) |
---|
893 | * goto the label "exit interrupt (simple case)" |
---|
894 | */ |
---|
895 | LDREG t0,_Context_Switch_necessary |
---|
896 | LDREG t1,_ISR_Signals_to_thread_executing |
---|
897 | NOP |
---|
898 | or t0,t0,t1 |
---|
899 | beq t0,zero,_ISR_Handler_exit |
---|
900 | NOP |
---|
901 | |
---|
902 | |
---|
903 | |
---|
904 | #ifdef INSTRUMENT_EXECUTING_THREAD |
---|
905 | lw t0,_Thread_Executing |
---|
906 | NOP |
---|
907 | sw t0,0x8001FFF4 |
---|
908 | #endif |
---|
909 | |
---|
910 | /* |
---|
911 | ** Turn on interrupts before entering Thread_Dispatch which |
---|
912 | ** will run for a while, thus allowing new interrupts to |
---|
913 | ** be serviced. Observe the Thread_Dispatch_disable_level interlock |
---|
914 | ** that prevents recursive entry into Thread_Dispatch. |
---|
915 | */ |
---|
916 | |
---|
917 | MFC0 t0, C0_SR |
---|
918 | #if __mips == 3 |
---|
919 | li t1,SR_EXL | SR_IE |
---|
920 | #elif __mips == 1 |
---|
921 | li t1,SR_IEC |
---|
922 | #endif |
---|
923 | or t0, t1 |
---|
924 | MTC0 t0, C0_SR |
---|
925 | NOP |
---|
926 | |
---|
927 | /* save off our stack frame so the context switcher can get to it */ |
---|
928 | la t0,__exceptionStackFrame |
---|
929 | STREG sp,(t0) |
---|
930 | |
---|
931 | jal _Thread_Dispatch |
---|
932 | NOP |
---|
933 | |
---|
934 | /* and make sure its clear in case we didn't dispatch. if we did, its |
---|
935 | ** already cleared */ |
---|
936 | la t0,__exceptionStackFrame |
---|
937 | STREG zero,(t0) |
---|
938 | NOP |
---|
939 | |
---|
940 | /* |
---|
941 | ** turn interrupts back off while we restore context so |
---|
942 | ** a badly timed interrupt won't accidentally mess things up |
---|
943 | */ |
---|
944 | MFC0 t0, C0_SR |
---|
945 | #if __mips == 3 |
---|
946 | li t1,SR_EXL | SR_IE |
---|
947 | #elif __mips == 1 |
---|
948 | li t1,SR_IEC | SR_KUC /* ints off, kernel mode on (kernel mode enabled is bit clear..argh!) */ |
---|
949 | #endif |
---|
950 | not t1 |
---|
951 | and t0, t1 |
---|
952 | MTC0 t0, C0_SR |
---|
953 | NOP |
---|
954 | |
---|
955 | #ifdef INSTRUMENT_EXECUTING_THREAD |
---|
956 | lw t0,_Thread_Executing |
---|
957 | NOP |
---|
958 | sw t0,0x8001FFF8 |
---|
959 | #endif |
---|
960 | |
---|
961 | |
---|
962 | /* |
---|
963 | * prepare to get out of interrupt |
---|
964 | * return from interrupt (maybe to _ISR_Dispatch) |
---|
965 | * |
---|
966 | * LABEL "exit interrupt (simple case):" |
---|
967 | * prepare to get out of interrupt |
---|
968 | * return from interrupt |
---|
969 | */ |
---|
970 | |
---|
971 | _ISR_Handler_exit: |
---|
972 | /* |
---|
973 | ** Skip the SR restore because its a global register. _CPU_Context_switch_restore |
---|
974 | ** adjusts it according to each task's configuration. If we didn't dispatch, the |
---|
975 | ** SR value isn't changed, so all we need to do is return. |
---|
976 | ** |
---|
977 | */ |
---|
978 | /* restore context from stack */ |
---|
979 | |
---|
980 | #ifdef INSTRUMENT_EXECUTING_THREAD |
---|
981 | lw t0,_Thread_Executing |
---|
982 | NOP |
---|
983 | sw t0, 0x8001FFFC |
---|
984 | #endif |
---|
985 | |
---|
986 | LDREG t8, R_MDLO*R_SZ(sp) |
---|
987 | LDREG t0, R_T0*R_SZ(sp) |
---|
988 | mtlo t8 |
---|
989 | LDREG t8, R_MDHI*R_SZ(sp) |
---|
990 | LDREG t1, R_T1*R_SZ(sp) |
---|
991 | mthi t8 |
---|
992 | LDREG t2, R_T2*R_SZ(sp) |
---|
993 | LDREG t3, R_T3*R_SZ(sp) |
---|
994 | LDREG t4, R_T4*R_SZ(sp) |
---|
995 | LDREG t5, R_T5*R_SZ(sp) |
---|
996 | LDREG t6, R_T6*R_SZ(sp) |
---|
997 | LDREG t7, R_T7*R_SZ(sp) |
---|
998 | LDREG t8, R_T8*R_SZ(sp) |
---|
999 | LDREG t9, R_T9*R_SZ(sp) |
---|
1000 | LDREG gp, R_GP*R_SZ(sp) |
---|
1001 | LDREG fp, R_FP*R_SZ(sp) |
---|
1002 | LDREG ra, R_RA*R_SZ(sp) |
---|
1003 | LDREG a0, R_A0*R_SZ(sp) |
---|
1004 | LDREG a1, R_A1*R_SZ(sp) |
---|
1005 | LDREG a2, R_A2*R_SZ(sp) |
---|
1006 | LDREG a3, R_A3*R_SZ(sp) |
---|
1007 | LDREG v1, R_V1*R_SZ(sp) |
---|
1008 | LDREG v0, R_V0*R_SZ(sp) |
---|
1009 | |
---|
1010 | LDREG k1, R_EPC*R_SZ(sp) |
---|
1011 | |
---|
1012 | .set noat |
---|
1013 | LDREG AT, R_AT*R_SZ(sp) |
---|
1014 | .set at |
---|
1015 | |
---|
1016 | ADDIU sp,sp,EXCP_STACK_SIZE |
---|
1017 | j k1 |
---|
1018 | rfe |
---|
1019 | NOP |
---|
1020 | |
---|
1021 | .set reorder |
---|
1022 | ENDFRAME(_ISR_Handler) |
---|
1023 | |
---|
1024 | |
---|
1025 | |
---|
1026 | |
---|
1027 | FRAME(mips_break,sp,0,ra) |
---|
1028 | .set noreorder |
---|
1029 | break 0x0 /* this statement must be first in this function, assumed so by mips-stub.c */ |
---|
1030 | NOP |
---|
1031 | j ra |
---|
1032 | NOP |
---|
1033 | .set reorder |
---|
1034 | ENDFRAME(mips_break) |
---|
1035 | |
---|