1 | /* |
---|
2 | * TODO: |
---|
3 | * Context_switch needs to only save callee save registers |
---|
4 | * I think this means can skip: r1, r2, r19-29, r31 |
---|
5 | * Ref: p 3-2 of Procedure Calling Conventions Manual |
---|
6 | * This should be #ifndef DEBUG so that debugger has |
---|
7 | * accurate visibility into all registers |
---|
8 | * |
---|
9 | * This file contains the assembly code for the HPPA implementation |
---|
10 | * of RTEMS. |
---|
11 | * |
---|
12 | * COPYRIGHT (c) 1994,95 by Division Incorporated |
---|
13 | * |
---|
14 | * The license and distribution terms for this file may be |
---|
15 | * found in the file LICENSE in this distribution or at |
---|
16 | * http://www.rtems.com/license/LICENSE. |
---|
17 | * |
---|
18 | * $Id$ |
---|
19 | */ |
---|
20 | |
---|
21 | #include <rtems/score/hppa.h> |
---|
22 | #include <rtems/score/cpu_asm.h> |
---|
23 | #include <rtems/score/cpu.h> |
---|
24 | #include <rtems/score/offsets.h> |
---|
25 | |
---|
26 | #if 0 |
---|
27 | #define TEXT_SEGMENT \ |
---|
28 | .SPACE $TEXT$ !\ |
---|
29 | .SUBSPA $CODE$ |
---|
30 | #define RO_SEGMENT \ |
---|
31 | .SPACE $TEXT$ !\ |
---|
32 | .SUBSPA $lit$ |
---|
33 | #define DATA_SEGMENT \ |
---|
34 | .SPACE $PRIVATE$ !\ |
---|
35 | .SUBSPA $data$ |
---|
36 | #define BSS_SEGMENT \ |
---|
37 | .SPACE $PRIVATE$ !\ |
---|
38 | .SUBSPA $bss$ |
---|
39 | #else |
---|
40 | #define TEXT_SEGMENT .text |
---|
41 | #define RO_SEGMENT .rodata |
---|
42 | #define DATA_SEGMENT .data |
---|
43 | #define BSS_SEGMENT .bss |
---|
44 | #endif |
---|
45 | |
---|
46 | |
---|
47 | |
---|
48 | #if 0 |
---|
49 | .SPACE $PRIVATE$ |
---|
50 | .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31 |
---|
51 | .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82 |
---|
52 | .SPACE $TEXT$ |
---|
53 | .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44 |
---|
54 | .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY |
---|
55 | .SPACE $TEXT$ |
---|
56 | .SUBSPA $CODE$ |
---|
57 | |
---|
58 | #endif |
---|
59 | TEXT_SEGMENT |
---|
60 | |
---|
61 | /* |
---|
62 | * Special register usage for context switch and interrupts |
---|
63 | * Stay away from %cr28 which is used for TLB misses on 72000 |
---|
64 | */ |
---|
65 | |
---|
66 | isr_arg0 .reg %cr24 |
---|
67 | isr_r9 .reg %cr25 |
---|
68 | isr_r8 .reg %cr26 |
---|
69 | |
---|
70 | /* |
---|
71 | * Interrupt stack frame looks like this |
---|
72 | * |
---|
73 | * offset item |
---|
74 | * ----------------------------------------------------------------- |
---|
75 | * INTEGER_CONTEXT_OFFSET Context_Control |
---|
76 | * FP_CONTEXT_OFFSET Context_Control_fp |
---|
77 | * |
---|
78 | * It is padded out to a multiple of 64 |
---|
79 | */ |
---|
80 | |
---|
81 | |
---|
82 | /*PAGE^L |
---|
83 | * void _Generic_ISR_Handler() |
---|
84 | * |
---|
85 | * This routine provides the RTEMS interrupt management. |
---|
86 | * |
---|
87 | * We jump here from the interrupt vector. |
---|
88 | * The HPPA hardware has done some stuff for us: |
---|
89 | * PSW saved in IPSW |
---|
90 | * PSW set to 0 |
---|
91 | * PSW[E] set to default (0) |
---|
92 | * PSW[M] set to 1 iff this is HPMC |
---|
93 | * |
---|
94 | * IIA queue is frozen (since PSW[Q] is now 0) |
---|
95 | * privilege level promoted to 0 |
---|
96 | * IIR, ISR, IOR potentially updated if PSW[Q] was 1 at trap |
---|
97 | * registers GR 1,8,9,16,17,24,25 copied to shadow regs |
---|
98 | * SHR 0 1 2 3 4 5 6 |
---|
99 | * |
---|
100 | * Our vector stub (in the BSP) MUST have done the following: |
---|
101 | * |
---|
102 | * a) Saved the original %r9 into %isr_r9 (%cr25) |
---|
103 | * b) Placed the vector number in %r9 |
---|
104 | * c) Was allowed to also destroy $isr_r8 (%cr26), |
---|
105 | * but the stub was NOT allowed to destroy any other registers. |
---|
106 | * |
---|
107 | * The typical stub sequence (in the BSP) should look like this: |
---|
108 | * |
---|
109 | * a) mtctl %r9,isr_r9 ; (save r9 in cr25) |
---|
110 | * b) ldi vector,%r9 ; (load constant vector number in r9) |
---|
111 | * c) mtctl %r8,isr_r8 ; (save r8 in cr26) |
---|
112 | * d) ldil L%MY_BSP_first_level_interrupt_handler,%r8 |
---|
113 | * e) ldo R%MY_BSP_first_level_interrupt_handler(%r8),%r8 |
---|
114 | * ; (point to BSP raw handler table) |
---|
115 | * f) ldwx,s %r9(%r8),%r8 ; (load value from raw handler table) |
---|
116 | * g) bv 0(%r8) ; (call raw handler: _Generic_ISR_Handler) |
---|
117 | * h) mfctl isr_r8,%r8 ; (restore r8 from cr26 in delay slot) |
---|
118 | * |
---|
119 | * Optionally, steps (c) thru (h) _could_ be replaced with a single |
---|
120 | * bl,n _Generic_ISR_Handler,%r0 |
---|
121 | * |
---|
122 | * |
---|
123 | */ |
---|
124 | .EXPORT _Generic_ISR_Handler,ENTRY,PRIV_LEV=0 |
---|
125 | _Generic_ISR_Handler: |
---|
126 | .PROC |
---|
127 | .CALLINFO FRAME=0,NO_CALLS |
---|
128 | .ENTRY |
---|
129 | |
---|
130 | mtctl arg0, isr_arg0 |
---|
131 | |
---|
132 | /* |
---|
133 | * save interrupt state |
---|
134 | */ |
---|
135 | mfctl ipsw, arg0 |
---|
136 | stw arg0, IPSW_OFFSET(sp) |
---|
137 | |
---|
138 | mfctl iir, arg0 |
---|
139 | stw arg0, IIR_OFFSET(sp) |
---|
140 | |
---|
141 | mfctl ior, arg0 |
---|
142 | stw arg0, IOR_OFFSET(sp) |
---|
143 | |
---|
144 | mfctl pcoq, arg0 |
---|
145 | stw arg0, PCOQFRONT_OFFSET(sp) |
---|
146 | |
---|
147 | mtctl %r0, pcoq |
---|
148 | mfctl pcoq, arg0 |
---|
149 | stw arg0, PCOQBACK_OFFSET(sp) |
---|
150 | |
---|
151 | mfctl %sar, arg0 |
---|
152 | stw arg0, SAR_OFFSET(sp) |
---|
153 | |
---|
154 | /* |
---|
155 | * Build an interrupt frame to hold the contexts we will need. |
---|
156 | * We have already saved the interrupt items on the stack |
---|
157 | * |
---|
158 | * At this point the following registers are damaged wrt the interrupt |
---|
159 | * reg current value saved value |
---|
160 | * ------------------------------------------------ |
---|
161 | * arg0 scratch isr_arg0 (cr24) |
---|
162 | * r9 vector number isr_r9 (cr25) |
---|
163 | * |
---|
164 | * Point to beginning of integer context and |
---|
165 | * save the integer context |
---|
166 | */ |
---|
167 | stw %r1,R1_OFFSET(sp) |
---|
168 | stw %r2,R2_OFFSET(sp) |
---|
169 | stw %r3,R3_OFFSET(sp) |
---|
170 | stw %r4,R4_OFFSET(sp) |
---|
171 | stw %r5,R5_OFFSET(sp) |
---|
172 | stw %r6,R6_OFFSET(sp) |
---|
173 | stw %r7,R7_OFFSET(sp) |
---|
174 | stw %r8,R8_OFFSET(sp) |
---|
175 | /* |
---|
176 | * skip r9 |
---|
177 | */ |
---|
178 | stw %r10,R10_OFFSET(sp) |
---|
179 | stw %r11,R11_OFFSET(sp) |
---|
180 | stw %r12,R12_OFFSET(sp) |
---|
181 | stw %r13,R13_OFFSET(sp) |
---|
182 | stw %r14,R14_OFFSET(sp) |
---|
183 | stw %r15,R15_OFFSET(sp) |
---|
184 | stw %r16,R16_OFFSET(sp) |
---|
185 | stw %r17,R17_OFFSET(sp) |
---|
186 | stw %r18,R18_OFFSET(sp) |
---|
187 | stw %r19,R19_OFFSET(sp) |
---|
188 | stw %r20,R20_OFFSET(sp) |
---|
189 | stw %r21,R21_OFFSET(sp) |
---|
190 | stw %r22,R22_OFFSET(sp) |
---|
191 | stw %r23,R23_OFFSET(sp) |
---|
192 | stw %r24,R24_OFFSET(sp) |
---|
193 | stw %r25,R25_OFFSET(sp) |
---|
194 | /* |
---|
195 | * skip arg0 |
---|
196 | */ |
---|
197 | stw %r27,R27_OFFSET(sp) |
---|
198 | stw %r28,R28_OFFSET(sp) |
---|
199 | stw %r29,R29_OFFSET(sp) |
---|
200 | stw %r30,R30_OFFSET(sp) |
---|
201 | stw %r31,R31_OFFSET(sp) |
---|
202 | |
---|
203 | /* Now most registers are available since they have been saved |
---|
204 | * |
---|
205 | * The following items are currently wrong in the integer context |
---|
206 | * reg current value saved value |
---|
207 | * ------------------------------------------------ |
---|
208 | * arg0 scratch isr_arg0 (cr24) |
---|
209 | * r9 vector number isr_r9 (cr25) |
---|
210 | * |
---|
211 | * Fix them |
---|
212 | */ |
---|
213 | |
---|
214 | mfctl isr_arg0,%r3 |
---|
215 | stw %r3,ARG0_OFFSET(sp) |
---|
216 | |
---|
217 | mfctl isr_r9,%r3 |
---|
218 | stw %r3,R9_OFFSET(sp) |
---|
219 | |
---|
220 | /* |
---|
221 | * At this point we are done with isr_arg0, and isr_r9 control registers |
---|
222 | * |
---|
223 | * Prepare to re-enter virtual mode |
---|
224 | * We need Q in case the interrupt handler enables interrupts |
---|
225 | */ |
---|
226 | |
---|
227 | ldil L%CPU_PSW_DEFAULT, arg0 |
---|
228 | ldo R%CPU_PSW_DEFAULT(arg0), arg0 |
---|
229 | mtctl arg0, ipsw |
---|
230 | |
---|
231 | /* |
---|
232 | * Now jump to "rest_of_isr_handler" with the rfi |
---|
233 | * We are assuming the space queues are all correct already |
---|
234 | */ |
---|
235 | |
---|
236 | ldil L%rest_of_isr_handler, arg0 |
---|
237 | ldo R%rest_of_isr_handler(arg0), arg0 |
---|
238 | mtctl arg0, pcoq |
---|
239 | ldo 4(arg0), arg0 |
---|
240 | mtctl arg0, pcoq |
---|
241 | |
---|
242 | rfi |
---|
243 | nop |
---|
244 | |
---|
245 | /* |
---|
246 | * At this point we are back in virtual mode and all our |
---|
247 | * normal addressing is once again ok. |
---|
248 | * |
---|
249 | * It is now ok to take an exception or trap |
---|
250 | */ |
---|
251 | |
---|
252 | rest_of_isr_handler: |
---|
253 | |
---|
254 | /* |
---|
255 | * Point to beginning of float context and |
---|
256 | * save the floating point context -- doing whatever patches are necessary |
---|
257 | */ |
---|
258 | |
---|
259 | .call ARGW0=GR |
---|
260 | bl _CPU_Save_float_context,%r2 |
---|
261 | ldo FP_CONTEXT_OFFSET(sp),arg0 |
---|
262 | |
---|
263 | /* |
---|
264 | * save the ptr to interrupt frame as an argument for the interrupt handler |
---|
265 | */ |
---|
266 | |
---|
267 | copy sp, arg1 |
---|
268 | |
---|
269 | /* |
---|
270 | * Advance the frame to point beyond all interrupt contexts (integer & float) |
---|
271 | * this also includes the pad to align to 64byte stack boundary |
---|
272 | */ |
---|
273 | ldo CPU_INTERRUPT_FRAME_SIZE(sp), sp |
---|
274 | |
---|
275 | /* |
---|
276 | * r3 -- &_ISR_Nest_level |
---|
277 | * r5 -- value _ISR_Nest_level |
---|
278 | * r4 -- &_Thread_Dispatch_disable_level |
---|
279 | * r6 -- value _Thread_Dispatch_disable_level |
---|
280 | * r9 -- vector number |
---|
281 | */ |
---|
282 | |
---|
283 | .import _ISR_Nest_level,data |
---|
284 | ldil L%_ISR_Nest_level,%r3 |
---|
285 | ldo R%_ISR_Nest_level(%r3),%r3 |
---|
286 | ldw 0(%r3),%r5 |
---|
287 | |
---|
288 | .import _Thread_Dispatch_disable_level,data |
---|
289 | ldil L%_Thread_Dispatch_disable_level,%r4 |
---|
290 | ldo R%_Thread_Dispatch_disable_level(%r4),%r4 |
---|
291 | ldw 0(%r4),%r6 |
---|
292 | |
---|
293 | /* |
---|
294 | * increment interrupt nest level counter. If outermost interrupt |
---|
295 | * switch the stack and squirrel away the previous sp. |
---|
296 | */ |
---|
297 | addi 1,%r5,%r5 |
---|
298 | stw %r5, 0(%r3) |
---|
299 | |
---|
300 | /* |
---|
301 | * compute and save new stack (with frame) |
---|
302 | * just in case we are nested -- simpler this way |
---|
303 | */ |
---|
304 | comibf,= 1,%r5,stack_done |
---|
305 | ldo 128(sp),%r7 |
---|
306 | |
---|
307 | /* |
---|
308 | * Switch to interrupt stack allocated by the interrupt manager (intr.c) |
---|
309 | */ |
---|
310 | .import _CPU_Interrupt_stack_low,data |
---|
311 | ldil L%_CPU_Interrupt_stack_low,%r7 |
---|
312 | ldw R%_CPU_Interrupt_stack_low(%r7),%r7 |
---|
313 | ldo 128(%r7),%r7 |
---|
314 | |
---|
315 | stack_done: |
---|
316 | /* |
---|
317 | * save our current stack pointer where the "old sp" is supposed to be |
---|
318 | */ |
---|
319 | stw sp, -4(%r7) |
---|
320 | /* |
---|
321 | * and switch stacks (or advance old stack in nested case) |
---|
322 | */ |
---|
323 | copy %r7, sp |
---|
324 | |
---|
325 | /* |
---|
326 | * increment the dispatch disable level counter. |
---|
327 | */ |
---|
328 | addi 1,%r6,%r6 |
---|
329 | stw %r6, 0(%r4) |
---|
330 | |
---|
331 | /* |
---|
332 | * load address of user handler |
---|
333 | * Note: No error checking is done, it is assumed that the |
---|
334 | * vector table contains a valid address or a stub |
---|
335 | * spurious handler. |
---|
336 | */ |
---|
337 | .import _ISR_Vector_table,data |
---|
338 | ldil L%_ISR_Vector_table,%r8 |
---|
339 | ldo R%_ISR_Vector_table(%r8),%r8 |
---|
340 | ldw 0(%r8),%r8 |
---|
341 | ldwx,s %r9(%r8),%r8 |
---|
342 | |
---|
343 | /* |
---|
344 | * invoke user interrupt handler |
---|
345 | * Interrupts are currently disabled, as per RTEMS convention |
---|
346 | * The handler has the option of re-enabling interrupts |
---|
347 | * NOTE: can not use 'bl' since it uses "pc-relative" addressing |
---|
348 | * and we are using a hard coded address from a table |
---|
349 | * So... we fudge r2 ourselves (ala dynacall) |
---|
350 | * arg0 = vector number, arg1 = ptr to rtems_interrupt_frame |
---|
351 | */ |
---|
352 | copy %r9, %r26 |
---|
353 | .call ARGW0=GR, ARGW1=GR |
---|
354 | blr %r0, rp |
---|
355 | bv,n 0(%r8) |
---|
356 | |
---|
357 | post_user_interrupt_handler: |
---|
358 | |
---|
359 | /* |
---|
360 | * Back from user handler(s) |
---|
361 | * Disable external interrupts (since the interrupt handler could |
---|
362 | * have turned them on) and return to the interrupted task stack (assuming |
---|
363 | * (_ISR_Nest_level == 0) |
---|
364 | */ |
---|
365 | |
---|
366 | rsm HPPA_PSW_I + HPPA_PSW_R, %r0 |
---|
367 | ldw -4(sp), sp |
---|
368 | |
---|
369 | /* |
---|
370 | * r3 -- (most of) &_ISR_Nest_level |
---|
371 | * r5 -- value _ISR_Nest_level |
---|
372 | * r4 -- (most of) &_Thread_Dispatch_disable_level |
---|
373 | * r6 -- value _Thread_Dispatch_disable_level |
---|
374 | * r7 -- (most of) &_ISR_Signals_to_thread_executing |
---|
375 | * r8 -- value _ISR_Signals_to_thread_executing |
---|
376 | */ |
---|
377 | |
---|
378 | .import _ISR_Nest_level,data |
---|
379 | ldil L%_ISR_Nest_level,%r3 |
---|
380 | ldw R%_ISR_Nest_level(%r3),%r5 |
---|
381 | |
---|
382 | .import _Thread_Dispatch_disable_level,data |
---|
383 | ldil L%_Thread_Dispatch_disable_level,%r4 |
---|
384 | ldw R%_Thread_Dispatch_disable_level(%r4),%r6 |
---|
385 | |
---|
386 | .import _ISR_Signals_to_thread_executing,data |
---|
387 | ldil L%_ISR_Signals_to_thread_executing,%r7 |
---|
388 | |
---|
389 | /* |
---|
390 | * decrement isr nest level |
---|
391 | */ |
---|
392 | addi -1, %r5, %r5 |
---|
393 | stw %r5, R%_ISR_Nest_level(%r3) |
---|
394 | |
---|
395 | /* |
---|
396 | * decrement dispatch disable level counter and, if not 0, go on |
---|
397 | */ |
---|
398 | addi -1,%r6,%r6 |
---|
399 | comibf,= 0,%r6,isr_restore |
---|
400 | stw %r6, R%_Thread_Dispatch_disable_level(%r4) |
---|
401 | |
---|
402 | /* |
---|
403 | * check whether or not a context switch is necessary |
---|
404 | */ |
---|
405 | .import _Context_Switch_necessary,data |
---|
406 | ldil L%_Context_Switch_necessary,%r8 |
---|
407 | ldw R%_Context_Switch_necessary(%r8),%r8 |
---|
408 | comibf,=,n 0,%r8,ISR_dispatch |
---|
409 | |
---|
410 | /* |
---|
411 | * check whether or not a context switch is necessary because an ISR |
---|
412 | * sent signals to the interrupted task |
---|
413 | */ |
---|
414 | ldw R%_ISR_Signals_to_thread_executing(%r7),%r8 |
---|
415 | comibt,=,n 0,%r8,isr_restore |
---|
416 | |
---|
417 | |
---|
418 | /* |
---|
419 | * OK, something happened while in ISR and we need to switch to a task |
---|
420 | * other than the one which was interrupted or the |
---|
421 | * ISR_Signals_to_thread_executing case |
---|
422 | * We also turn on interrupts, since the interrupted task had them |
---|
423 | * on (obviously :-) and Thread_Dispatch is happy to leave ints on. |
---|
424 | */ |
---|
425 | |
---|
426 | ISR_dispatch: |
---|
427 | stw %r0, R%_ISR_Signals_to_thread_executing(%r7) |
---|
428 | |
---|
429 | ssm HPPA_PSW_I, %r0 |
---|
430 | |
---|
431 | .import _Thread_Dispatch,code |
---|
432 | .call |
---|
433 | bl _Thread_Dispatch,%r2 |
---|
434 | ldo 128(sp),sp |
---|
435 | |
---|
436 | ldo -128(sp),sp |
---|
437 | |
---|
438 | isr_restore: |
---|
439 | |
---|
440 | /* |
---|
441 | * enable interrupts during most of restore |
---|
442 | */ |
---|
443 | ssm HPPA_PSW_I, %r0 |
---|
444 | |
---|
445 | /* |
---|
446 | * Get a pointer to beginning of our stack frame |
---|
447 | */ |
---|
448 | ldo -CPU_INTERRUPT_FRAME_SIZE(sp), %arg1 |
---|
449 | |
---|
450 | /* |
---|
451 | * restore float |
---|
452 | */ |
---|
453 | .call ARGW0=GR |
---|
454 | bl _CPU_Restore_float_context,%r2 |
---|
455 | ldo FP_CONTEXT_OFFSET(%arg1), arg0 |
---|
456 | |
---|
457 | copy %arg1, %arg0 |
---|
458 | |
---|
459 | /* |
---|
460 | * ********** FALL THRU ********** |
---|
461 | */ |
---|
462 | |
---|
463 | /* |
---|
464 | * Jump here from bottom of Context_Switch |
---|
465 | * Also called directly by _CPU_Context_Restart_self via _Thread_Restart_self |
---|
466 | * restore interrupt state |
---|
467 | */ |
---|
468 | |
---|
469 | .EXPORT _CPU_Context_restore |
---|
470 | _CPU_Context_restore: |
---|
471 | |
---|
472 | /* |
---|
473 | * restore integer state |
---|
474 | */ |
---|
475 | ldw R1_OFFSET(arg0),%r1 |
---|
476 | ldw R2_OFFSET(arg0),%r2 |
---|
477 | ldw R3_OFFSET(arg0),%r3 |
---|
478 | ldw R4_OFFSET(arg0),%r4 |
---|
479 | ldw R5_OFFSET(arg0),%r5 |
---|
480 | ldw R6_OFFSET(arg0),%r6 |
---|
481 | ldw R7_OFFSET(arg0),%r7 |
---|
482 | ldw R8_OFFSET(arg0),%r8 |
---|
483 | ldw R9_OFFSET(arg0),%r9 |
---|
484 | ldw R10_OFFSET(arg0),%r10 |
---|
485 | ldw R11_OFFSET(arg0),%r11 |
---|
486 | ldw R12_OFFSET(arg0),%r12 |
---|
487 | ldw R13_OFFSET(arg0),%r13 |
---|
488 | ldw R14_OFFSET(arg0),%r14 |
---|
489 | ldw R15_OFFSET(arg0),%r15 |
---|
490 | ldw R16_OFFSET(arg0),%r16 |
---|
491 | ldw R17_OFFSET(arg0),%r17 |
---|
492 | ldw R18_OFFSET(arg0),%r18 |
---|
493 | ldw R19_OFFSET(arg0),%r19 |
---|
494 | ldw R20_OFFSET(arg0),%r20 |
---|
495 | ldw R21_OFFSET(arg0),%r21 |
---|
496 | ldw R22_OFFSET(arg0),%r22 |
---|
497 | ldw R23_OFFSET(arg0),%r23 |
---|
498 | ldw R24_OFFSET(arg0),%r24 |
---|
499 | /* |
---|
500 | * skipping r25; used as scratch register below |
---|
501 | * skipping r26 (arg0) until we are done with it |
---|
502 | */ |
---|
503 | ldw R27_OFFSET(arg0),%r27 |
---|
504 | ldw R28_OFFSET(arg0),%r28 |
---|
505 | ldw R29_OFFSET(arg0),%r29 |
---|
506 | /* |
---|
507 | * skipping r30 (sp) until we turn off interrupts |
---|
508 | */ |
---|
509 | ldw R31_OFFSET(arg0),%r31 |
---|
510 | |
---|
511 | /* |
---|
512 | * Turn off Q & R & I so we can write r30 and interrupt control registers |
---|
513 | */ |
---|
514 | rsm HPPA_PSW_Q + HPPA_PSW_R + HPPA_PSW_I, %r0 |
---|
515 | |
---|
516 | /* |
---|
517 | * now safe to restore r30 |
---|
518 | */ |
---|
519 | ldw R30_OFFSET(arg0),%r30 |
---|
520 | |
---|
521 | ldw IPSW_OFFSET(arg0), %r25 |
---|
522 | mtctl %r25, ipsw |
---|
523 | |
---|
524 | ldw SAR_OFFSET(arg0), %r25 |
---|
525 | mtctl %r25, sar |
---|
526 | |
---|
527 | ldw PCOQFRONT_OFFSET(arg0), %r25 |
---|
528 | mtctl %r25, pcoq |
---|
529 | |
---|
530 | ldw PCOQBACK_OFFSET(arg0), %r25 |
---|
531 | mtctl %r25, pcoq |
---|
532 | |
---|
533 | /* |
---|
534 | * Load r25 with interrupts off |
---|
535 | */ |
---|
536 | ldw R25_OFFSET(arg0),%r25 |
---|
537 | /* |
---|
538 | * Must load r26 (arg0) last |
---|
539 | */ |
---|
540 | ldw R26_OFFSET(arg0),%r26 |
---|
541 | |
---|
542 | isr_exit: |
---|
543 | rfi |
---|
544 | .EXIT |
---|
545 | .PROCEND |
---|
546 | |
---|
547 | /* |
---|
548 | * This section is used to context switch floating point registers. |
---|
549 | * Ref: 6-35 of Architecture 1.1 |
---|
550 | * |
---|
551 | * NOTE: since integer multiply uses the floating point unit, |
---|
552 | * we have to save/restore fp on every trap. We cannot |
---|
553 | * just try to keep track of fp usage. |
---|
554 | */ |
---|
555 | |
---|
556 | .align 32 |
---|
557 | .EXPORT _CPU_Save_float_context,ENTRY,PRIV_LEV=0 |
---|
558 | _CPU_Save_float_context: |
---|
559 | .PROC |
---|
560 | .CALLINFO FRAME=0,NO_CALLS |
---|
561 | .ENTRY |
---|
562 | fstds,ma %fr0,8(%arg0) |
---|
563 | fstds,ma %fr1,8(%arg0) |
---|
564 | fstds,ma %fr2,8(%arg0) |
---|
565 | fstds,ma %fr3,8(%arg0) |
---|
566 | fstds,ma %fr4,8(%arg0) |
---|
567 | fstds,ma %fr5,8(%arg0) |
---|
568 | fstds,ma %fr6,8(%arg0) |
---|
569 | fstds,ma %fr7,8(%arg0) |
---|
570 | fstds,ma %fr8,8(%arg0) |
---|
571 | fstds,ma %fr9,8(%arg0) |
---|
572 | fstds,ma %fr10,8(%arg0) |
---|
573 | fstds,ma %fr11,8(%arg0) |
---|
574 | fstds,ma %fr12,8(%arg0) |
---|
575 | fstds,ma %fr13,8(%arg0) |
---|
576 | fstds,ma %fr14,8(%arg0) |
---|
577 | fstds,ma %fr15,8(%arg0) |
---|
578 | fstds,ma %fr16,8(%arg0) |
---|
579 | fstds,ma %fr17,8(%arg0) |
---|
580 | fstds,ma %fr18,8(%arg0) |
---|
581 | fstds,ma %fr19,8(%arg0) |
---|
582 | fstds,ma %fr20,8(%arg0) |
---|
583 | fstds,ma %fr21,8(%arg0) |
---|
584 | fstds,ma %fr22,8(%arg0) |
---|
585 | fstds,ma %fr23,8(%arg0) |
---|
586 | fstds,ma %fr24,8(%arg0) |
---|
587 | fstds,ma %fr25,8(%arg0) |
---|
588 | fstds,ma %fr26,8(%arg0) |
---|
589 | fstds,ma %fr27,8(%arg0) |
---|
590 | fstds,ma %fr28,8(%arg0) |
---|
591 | fstds,ma %fr29,8(%arg0) |
---|
592 | fstds,ma %fr30,8(%arg0) |
---|
593 | fstds %fr31,0(%arg0) |
---|
594 | bv 0(%r2) |
---|
595 | addi -(31*8), %arg0, %arg0 ; restore arg0 just for fun |
---|
596 | .EXIT |
---|
597 | .PROCEND |
---|
598 | |
---|
599 | .align 32 |
---|
600 | .EXPORT _CPU_Restore_float_context,ENTRY,PRIV_LEV=0 |
---|
601 | _CPU_Restore_float_context: |
---|
602 | .PROC |
---|
603 | .CALLINFO FRAME=0,NO_CALLS |
---|
604 | .ENTRY |
---|
605 | addi (31*8), %arg0, %arg0 ; point at last double |
---|
606 | fldds 0(%arg0),%fr31 |
---|
607 | fldds,mb -8(%arg0),%fr30 |
---|
608 | fldds,mb -8(%arg0),%fr29 |
---|
609 | fldds,mb -8(%arg0),%fr28 |
---|
610 | fldds,mb -8(%arg0),%fr27 |
---|
611 | fldds,mb -8(%arg0),%fr26 |
---|
612 | fldds,mb -8(%arg0),%fr25 |
---|
613 | fldds,mb -8(%arg0),%fr24 |
---|
614 | fldds,mb -8(%arg0),%fr23 |
---|
615 | fldds,mb -8(%arg0),%fr22 |
---|
616 | fldds,mb -8(%arg0),%fr21 |
---|
617 | fldds,mb -8(%arg0),%fr20 |
---|
618 | fldds,mb -8(%arg0),%fr19 |
---|
619 | fldds,mb -8(%arg0),%fr18 |
---|
620 | fldds,mb -8(%arg0),%fr17 |
---|
621 | fldds,mb -8(%arg0),%fr16 |
---|
622 | fldds,mb -8(%arg0),%fr15 |
---|
623 | fldds,mb -8(%arg0),%fr14 |
---|
624 | fldds,mb -8(%arg0),%fr13 |
---|
625 | fldds,mb -8(%arg0),%fr12 |
---|
626 | fldds,mb -8(%arg0),%fr11 |
---|
627 | fldds,mb -8(%arg0),%fr10 |
---|
628 | fldds,mb -8(%arg0),%fr9 |
---|
629 | fldds,mb -8(%arg0),%fr8 |
---|
630 | fldds,mb -8(%arg0),%fr7 |
---|
631 | fldds,mb -8(%arg0),%fr6 |
---|
632 | fldds,mb -8(%arg0),%fr5 |
---|
633 | fldds,mb -8(%arg0),%fr4 |
---|
634 | fldds,mb -8(%arg0),%fr3 |
---|
635 | fldds,mb -8(%arg0),%fr2 |
---|
636 | fldds,mb -8(%arg0),%fr1 |
---|
637 | bv 0(%r2) |
---|
638 | fldds,mb -8(%arg0),%fr0 |
---|
639 | .EXIT |
---|
640 | .PROCEND |
---|
641 | |
---|
642 | /* |
---|
643 | * These 2 small routines are unused right now. |
---|
644 | * Normally we just go thru _CPU_Save_float_context (and Restore) |
---|
645 | * |
---|
646 | * Here we just deref the ptr and jump up, letting _CPU_Save_float_context |
---|
647 | * do the return for us. |
---|
648 | */ |
---|
649 | |
---|
650 | .EXPORT _CPU_Context_save_fp,ENTRY,PRIV_LEV=0 |
---|
651 | _CPU_Context_save_fp: |
---|
652 | .PROC |
---|
653 | .CALLINFO FRAME=0,NO_CALLS |
---|
654 | .ENTRY |
---|
655 | bl _CPU_Save_float_context, %r0 |
---|
656 | ldw 0(%arg0), %arg0 |
---|
657 | .EXIT |
---|
658 | .PROCEND |
---|
659 | |
---|
660 | .EXPORT _CPU_Context_restore_fp,ENTRY,PRIV_LEV=0 |
---|
661 | _CPU_Context_restore_fp: |
---|
662 | .PROC |
---|
663 | .CALLINFO FRAME=0,NO_CALLS |
---|
664 | .ENTRY |
---|
665 | bl _CPU_Restore_float_context, %r0 |
---|
666 | ldw 0(%arg0), %arg0 |
---|
667 | .EXIT |
---|
668 | .PROCEND |
---|
669 | |
---|
670 | |
---|
671 | /* |
---|
672 | * void _CPU_Context_switch( run_context, heir_context ) |
---|
673 | * |
---|
674 | * This routine performs a normal non-FP context switch. |
---|
675 | */ |
---|
676 | |
---|
677 | .align 32 |
---|
678 | .EXPORT _CPU_Context_switch,ENTRY,PRIV_LEV=0,ARGW0=GR,ARGW1=GR |
---|
679 | _CPU_Context_switch: |
---|
680 | .PROC |
---|
681 | .CALLINFO FRAME=64 |
---|
682 | .ENTRY |
---|
683 | |
---|
684 | /* |
---|
685 | * Save the integer context |
---|
686 | */ |
---|
687 | stw %r1,R1_OFFSET(arg0) |
---|
688 | stw %r2,R2_OFFSET(arg0) |
---|
689 | stw %r3,R3_OFFSET(arg0) |
---|
690 | stw %r4,R4_OFFSET(arg0) |
---|
691 | stw %r5,R5_OFFSET(arg0) |
---|
692 | stw %r6,R6_OFFSET(arg0) |
---|
693 | stw %r7,R7_OFFSET(arg0) |
---|
694 | stw %r8,R8_OFFSET(arg0) |
---|
695 | stw %r9,R9_OFFSET(arg0) |
---|
696 | stw %r10,R10_OFFSET(arg0) |
---|
697 | stw %r11,R11_OFFSET(arg0) |
---|
698 | stw %r12,R12_OFFSET(arg0) |
---|
699 | stw %r13,R13_OFFSET(arg0) |
---|
700 | stw %r14,R14_OFFSET(arg0) |
---|
701 | stw %r15,R15_OFFSET(arg0) |
---|
702 | stw %r16,R16_OFFSET(arg0) |
---|
703 | stw %r17,R17_OFFSET(arg0) |
---|
704 | stw %r18,R18_OFFSET(arg0) |
---|
705 | stw %r19,R19_OFFSET(arg0) |
---|
706 | stw %r20,R20_OFFSET(arg0) |
---|
707 | stw %r21,R21_OFFSET(arg0) |
---|
708 | stw %r22,R22_OFFSET(arg0) |
---|
709 | stw %r23,R23_OFFSET(arg0) |
---|
710 | stw %r24,R24_OFFSET(arg0) |
---|
711 | stw %r25,R25_OFFSET(arg0) |
---|
712 | stw %r26,R26_OFFSET(arg0) |
---|
713 | stw %r27,R27_OFFSET(arg0) |
---|
714 | stw %r28,R28_OFFSET(arg0) |
---|
715 | stw %r29,R29_OFFSET(arg0) |
---|
716 | stw %r30,R30_OFFSET(arg0) |
---|
717 | stw %r31,R31_OFFSET(arg0) |
---|
718 | |
---|
719 | /* |
---|
720 | * fill in interrupt context section |
---|
721 | */ |
---|
722 | stw %r2, PCOQFRONT_OFFSET(%arg0) |
---|
723 | ldo 4(%r2), %r2 |
---|
724 | stw %r2, PCOQBACK_OFFSET(%arg0) |
---|
725 | |
---|
726 | /* |
---|
727 | * Generate a suitable IPSW by using the system default psw |
---|
728 | * with the current low bits added in. |
---|
729 | */ |
---|
730 | |
---|
731 | ldil L%CPU_PSW_DEFAULT, %r2 |
---|
732 | ldo R%CPU_PSW_DEFAULT(%r2), %r2 |
---|
733 | ssm 0, %arg2 |
---|
734 | dep %arg2, 31, 8, %r2 |
---|
735 | stw %r2, IPSW_OFFSET(%arg0) |
---|
736 | |
---|
737 | /* |
---|
738 | * at this point, the running task context is completely saved |
---|
739 | * Now jump to the bottom of the interrupt handler to load the |
---|
740 | * heirs context |
---|
741 | */ |
---|
742 | |
---|
743 | b _CPU_Context_restore |
---|
744 | copy %arg1, %arg0 |
---|
745 | |
---|
746 | .EXIT |
---|
747 | .PROCEND |
---|
748 | |
---|
749 | |
---|
750 | /* |
---|
751 | * Find first bit |
---|
752 | * NOTE: |
---|
753 | * This is used (and written) only for the ready chain code and |
---|
754 | * priority bit maps. |
---|
755 | * Any other use constitutes fraud. |
---|
756 | * Returns first bit from the least significant side. |
---|
757 | * Eg: if input is 0x8001 |
---|
758 | * output will indicate the '1' bit and return 0. |
---|
759 | * This is counter to HPPA bit numbering which calls this |
---|
760 | * bit 31. This way simplifies the macros _CPU_Priority_Mask |
---|
761 | * and _CPU_Priority_Bits_index. |
---|
762 | * |
---|
763 | * NOTE: |
---|
764 | * We just use 16 bit version |
---|
765 | * does not handle zero case |
---|
766 | * |
---|
767 | * Based on the UTAH Mach libc version of ffs. |
---|
768 | */ |
---|
769 | |
---|
770 | .align 32 |
---|
771 | .EXPORT hppa_rtems_ffs,ENTRY,PRIV_LEV=0,ARGW0=GR |
---|
772 | hppa_rtems_ffs: |
---|
773 | .PROC |
---|
774 | .CALLINFO FRAME=0,NO_CALLS |
---|
775 | .ENTRY |
---|
776 | |
---|
777 | #ifdef RETURN_ERROR_ON_ZERO |
---|
778 | comb,= %arg0,%r0,ffsdone ; If arg0 is 0 |
---|
779 | ldi -1,%ret0 ; return -1 |
---|
780 | #endif |
---|
781 | |
---|
782 | #if BITFIELD_SIZE == 32 |
---|
783 | ldi 31,%ret0 ; Set return to high bit |
---|
784 | extru,= %arg0,31,16,%r0 ; If low 16 bits are non-zero |
---|
785 | addi,tr -16,%ret0,%ret0 ; subtract 16 from bitpos |
---|
786 | shd %r0,%arg0,16,%arg0 ; else shift right 16 bits |
---|
787 | #else |
---|
788 | ldi 15,%ret0 ; Set return to high bit |
---|
789 | #endif |
---|
790 | extru,= %arg0,31,8,%r0 ; If low 8 bits are non-zero |
---|
791 | addi,tr -8,%ret0,%ret0 ; subtract 8 from bitpos |
---|
792 | shd %r0,%arg0,8,%arg0 ; else shift right 8 bits |
---|
793 | extru,= %arg0,31,4,%r0 ; If low 4 bits are non-zero |
---|
794 | addi,tr -4,%ret0,%ret0 ; subtract 4 from bitpos |
---|
795 | shd %r0,%arg0,4,%arg0 ; else shift right 4 bits |
---|
796 | extru,= %arg0,31,2,%r0 ; If low 2 bits are non-zero |
---|
797 | addi,tr -2,%ret0,%ret0 ; subtract 2 from bitpos |
---|
798 | shd %r0,%arg0,2,%arg0 ; else shift right 2 bits |
---|
799 | extru,= %arg0,31,1,%r0 ; If low bit is non-zero |
---|
800 | addi -1,%ret0,%ret0 ; subtract 1 from bitpos |
---|
801 | ffsdone: |
---|
802 | bv,n 0(%r2) |
---|
803 | nop |
---|
804 | .EXIT |
---|
805 | .PROCEND |
---|