source: rtems/c/src/exec/score/cpu/hppa1.1/cpu_asm.s @ 254b4450

4.104.114.84.95
Last change on this file since 254b4450 was 254b4450, checked in by Joel Sherrill <joel.sherrill@…>, on 04/01/97 at 23:07:52

This set of changes is the build of what was required to convert to
GNU autoconf. This is the first large step in allowing an RTEMS
user to perform a one-tree build (per crossgcc FAQ) including RTEMS
in the build process. With this change RTEMS is configured in
built in the same style as the GNU tools, yet retains the basic
structure of its traditional Makefiles (ala Tony Bennett).
Jiri Gaisler (jgais@…) deserves (and received)
a big thank you for doing this.

There are still issues to be resolved but as of this commit, all target
which can be built on a linux host have been using a modified version
of the source Jiri submitted. This source was merged and most targets
built in the tree before this commit.

There are some issues which remain to be resolved but they are primarily
related to host OS dependencies, script issues, the use of gawk
for hack_specs, and the dependence on gcc snapshots. These will
be resolved.

  • Property mode set to 100644
File size: 22.3 KB
Line 
1/*
2 * TODO:
3 *       Context_switch needs to only save callee save registers
4 *       I think this means can skip:    r1, r2, r19-29, r31
5 *       Ref:     p 3-2 of Procedure Calling Conventions Manual
6 *       This should be #ifndef DEBUG so that debugger has
7 *       accurate visibility into all registers
8 *
9 *  This file contains the assembly code for the HPPA implementation
10 *  of RTEMS.
11 *
12 *  COPYRIGHT (c) 1994,95 by Division Incorporated
13 *
14 *  To anyone who acknowledges that this file is provided "AS IS"
15 *  without any express or implied warranty:
16 *      permission to use, copy, modify, and distribute this file
17 *      for any purpose is hereby granted without fee, provided that
18 *      the above copyright notice and this notice appears in all
19 *      copies, and that the name of Division Incorporated not be
20 *      used in advertising or publicity pertaining to distribution
21 *      of the software without specific, written prior permission.
22 *      Division Incorporated makes no representations about the
23 *      suitability of this software for any purpose.
24 *
25 *  $Id$
26 */
27
28#include <rtems/score/hppa.h>
29#include <rtems/score/cpu_asm.h>
30#include <rtems/score/cpu.h>
31#include <rtems/score/offsets.h>
32
33        .SPACE $PRIVATE$
34        .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
35        .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
36        .SPACE $TEXT$
37        .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
38        .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
39        .SPACE $TEXT$
40        .SUBSPA $CODE$
41
42/*
43 * Special register usage for context switch and interrupts
44 * Stay away from %cr28 which is used for TLB misses on 72000
45 */
46
47isr_arg0           .reg    %cr24
48isr_r9             .reg    %cr25
49isr_r8             .reg    %cr26
50
51/*
52 * Interrupt stack frame looks like this
53 *
54 *  offset                                   item
55 * -----------------------------------------------------------------
56 *   INTEGER_CONTEXT_OFFSET             Context_Control
57 *   FP_CONTEXT_OFFSET                  Context_Control_fp
58 *
59 * It is padded out to a multiple of 64
60 */
61
62
63/*PAGE^L
64 *  void _Generic_ISR_Handler()
65 *
66 *  This routine provides the RTEMS interrupt management.
67 *
68 *   We jump here from the interrupt vector.
69 *   The HPPA hardware has done some stuff for us:
70 *       PSW saved in IPSW
71 *       PSW set to 0
72 *       PSW[E] set to default (0)
73 *       PSW[M] set to 1 iff this is HPMC
74 *
75 *       IIA queue is frozen (since PSW[Q] is now 0)
76 *       privilege level promoted to 0
77 *       IIR, ISR, IOR potentially updated if PSW[Q] was 1 at trap
78 *       registers GR  1,8,9,16,17,24,25 copied to shadow regs
79 *                 SHR 0 1 2  3  4  5  6
80 *
81 *   Our vector stub (in the BSP) MUST have done the following: 
82 *
83 *   a) Saved the original %r9 into %isr_r9 (%cr25)
84 *   b) Placed the vector number in %r9
85 *   c) Was allowed to also destroy $isr_r8 (%cr26),
86 *      but the stub was NOT allowed to destroy any other registers.
87 *
88 *   The typical stub sequence (in the BSP) should look like this:
89 *
90 *   a)     mtctl   %r9,isr_r9     ; (save r9 in cr25)
91 *   b)     ldi     vector,%r9     ; (load constant vector number in r9)
92 *   c)     mtctl   %r8,isr_r8     ; (save r8 in cr26)
93 *   d)     ldil    L%MY_BSP_first_level_interrupt_handler,%r8
94 *   e)     ldo     R%MY_BSP_first_level_interrupt_handler(%r8),%r8
95 *                                 ; (point to BSP raw handler table)
96 *   f)     ldwx,s  %r9(%r8),%r8   ; (load value from raw handler table)
97 *   g)     bv      0(%r8)         ; (call raw handler: _Generic_ISR_Handler)
98 *   h)     mfctl   isr_r8,%r8     ; (restore r8 from cr26 in delay slot)
99 *
100 *   Optionally, steps (c) thru (h) _could_ be replaced with a single
101 *          bl,n    _Generic_ISR_Handler,%r0
102 *
103 *
104 */
105        .EXPORT _Generic_ISR_Handler,ENTRY,PRIV_LEV=0
106_Generic_ISR_Handler:
107        .PROC
108        .CALLINFO FRAME=0,NO_CALLS
109        .ENTRY
110
111        mtctl     arg0, isr_arg0
112
113/*
114 * save interrupt state
115 */
116        mfctl     ipsw, arg0
117        stw       arg0, IPSW_OFFSET(sp)
118
119        mfctl     iir, arg0
120        stw       arg0, IIR_OFFSET(sp)
121
122        mfctl     ior, arg0
123        stw       arg0, IOR_OFFSET(sp)
124
125        mfctl     pcoq, arg0
126        stw       arg0, PCOQFRONT_OFFSET(sp)
127
128        mtctl     %r0, pcoq
129        mfctl     pcoq, arg0
130        stw       arg0, PCOQBACK_OFFSET(sp)
131
132        mfctl     %sar, arg0
133        stw       arg0, SAR_OFFSET(sp)
134
135/*
136 * Build an interrupt frame to hold the contexts we will need.
137 * We have already saved the interrupt items on the stack
138 *
139 * At this point the following registers are damaged wrt the interrupt
140 *  reg    current value        saved value
141 * ------------------------------------------------
142 *  arg0   scratch               isr_arg0  (cr24)
143 *  r9     vector number         isr_r9    (cr25)
144 *
145 * Point to beginning of integer context and
146 * save the integer context
147 */
148        stw         %r1,R1_OFFSET(sp)
149        stw         %r2,R2_OFFSET(sp)
150        stw         %r3,R3_OFFSET(sp)
151        stw         %r4,R4_OFFSET(sp)
152        stw         %r5,R5_OFFSET(sp)
153        stw         %r6,R6_OFFSET(sp)
154        stw         %r7,R7_OFFSET(sp)
155        stw         %r8,R8_OFFSET(sp)
156/*
157 * skip r9
158 */
159        stw         %r10,R10_OFFSET(sp)
160        stw         %r11,R11_OFFSET(sp)
161        stw         %r12,R12_OFFSET(sp)
162        stw         %r13,R13_OFFSET(sp)
163        stw         %r14,R14_OFFSET(sp)
164        stw         %r15,R15_OFFSET(sp)
165        stw         %r16,R16_OFFSET(sp)
166        stw         %r17,R17_OFFSET(sp)
167        stw         %r18,R18_OFFSET(sp)
168        stw         %r19,R19_OFFSET(sp)
169        stw         %r20,R20_OFFSET(sp)
170        stw         %r21,R21_OFFSET(sp)
171        stw         %r22,R22_OFFSET(sp)
172        stw         %r23,R23_OFFSET(sp)
173        stw         %r24,R24_OFFSET(sp)
174        stw         %r25,R25_OFFSET(sp)
175/*
176 * skip arg0
177 */
178        stw         %r27,R27_OFFSET(sp)
179        stw         %r28,R28_OFFSET(sp)
180        stw         %r29,R29_OFFSET(sp)
181        stw         %r30,R30_OFFSET(sp)
182        stw         %r31,R31_OFFSET(sp)
183
184/* Now most registers are available since they have been saved
185 *
186 * The following items are currently wrong in the integer context
187 *  reg    current value        saved value
188 * ------------------------------------------------
189 *  arg0   scratch               isr_arg0  (cr24)
190 *  r9     vector number         isr_r9    (cr25)
191 *
192 * Fix them
193 */
194
195         mfctl      isr_arg0,%r3
196         stw        %r3,ARG0_OFFSET(sp)
197
198         mfctl      isr_r9,%r3
199         stw        %r3,R9_OFFSET(sp)
200
201/*
202 * At this point we are done with isr_arg0, and isr_r9 control registers
203 *
204 * Prepare to re-enter virtual mode
205 * We need Q in case the interrupt handler enables interrupts
206 */
207
208        ldil      L%CPU_PSW_DEFAULT, arg0
209        ldo       R%CPU_PSW_DEFAULT(arg0), arg0
210        mtctl     arg0, ipsw
211
212/*
213 * Now jump to "rest_of_isr_handler" with the rfi
214 * We are assuming the space queues are all correct already
215 */
216
217        ldil      L%rest_of_isr_handler, arg0
218        ldo       R%rest_of_isr_handler(arg0), arg0
219        mtctl     arg0, pcoq
220        ldo       4(arg0), arg0
221        mtctl     arg0, pcoq
222
223        rfi
224        nop
225
226/*
227 * At this point we are back in virtual mode and all our
228 *  normal addressing is once again ok.
229 *
230 *  It is now ok to take an exception or trap
231 */
232
233rest_of_isr_handler:
234
235/*
236 * Point to beginning of float context and
237 * save the floating point context -- doing whatever patches are necessary
238 */
239
240        .call ARGW0=GR
241        bl          _CPU_Save_float_context,%r2
242        ldo         FP_CONTEXT_OFFSET(sp),arg0
243
244/*
245 * save the ptr to interrupt frame as an argument for the interrupt handler
246 */
247
248        copy        sp, arg1
249
250/*
251 * Advance the frame to point beyond all interrupt contexts (integer & float)
252 * this also includes the pad to align to 64byte stack boundary
253 */
254        ldo         CPU_INTERRUPT_FRAME_SIZE(sp), sp
255
256/*
257 *    r3  -- &_ISR_Nest_level
258 *    r5  -- value _ISR_Nest_level
259 *    r4  -- &_Thread_Dispatch_disable_level
260 *    r6  -- value _Thread_Dispatch_disable_level
261 *    r9  -- vector number
262 */
263
264        .import   _ISR_Nest_level,data
265        ldil      L%_ISR_Nest_level,%r3
266        ldo       R%_ISR_Nest_level(%r3),%r3
267        ldw       0(%r3),%r5
268
269        .import   _Thread_Dispatch_disable_level,data
270        ldil      L%_Thread_Dispatch_disable_level,%r4
271        ldo       R%_Thread_Dispatch_disable_level(%r4),%r4
272        ldw       0(%r4),%r6
273
274/*
275 * increment interrupt nest level counter.  If outermost interrupt
276 * switch the stack and squirrel away the previous sp.
277 */
278        addi      1,%r5,%r5
279        stw       %r5, 0(%r3)
280
281/*
282 * compute and save new stack (with frame)
283 * just in case we are nested -- simpler this way
284 */
285        comibf,=  1,%r5,stack_done
286        ldo       128(sp),%r7
287
288/*
289 * Switch to interrupt stack allocated by the interrupt manager (intr.c)
290 */
291        .import   _CPU_Interrupt_stack_low,data
292        ldil      L%_CPU_Interrupt_stack_low,%r7
293        ldw       R%_CPU_Interrupt_stack_low(%r7),%r7
294        ldo       128(%r7),%r7
295
296stack_done:
297/*
298 * save our current stack pointer where the "old sp" is supposed to be
299 */
300        stw       sp, -4(%r7)
301/*
302 * and switch stacks (or advance old stack in nested case)
303 */
304        copy      %r7, sp
305
306/*
307 * increment the dispatch disable level counter.
308 */
309        addi      1,%r6,%r6
310        stw       %r6, 0(%r4)
311
312/*
313 * load address of user handler
314 * Note:  No error checking is done, it is assumed that the
315 *        vector table contains a valid address or a stub
316 *        spurious handler.
317 */
318        .import   _ISR_Vector_table,data
319        ldil      L%_ISR_Vector_table,%r8
320        ldo       R%_ISR_Vector_table(%r8),%r8
321        ldwx,s    %r9(%r8),%r8
322
323/*
324 * invoke user interrupt handler
325 * Interrupts are currently disabled, as per RTEMS convention
326 * The handler has the option of re-enabling interrupts
327 * NOTE:  can not use 'bl' since it uses "pc-relative" addressing
328 *    and we are using a hard coded address from a table
329 *  So... we fudge r2 ourselves (ala dynacall)
330 *  arg0 = vector number, arg1 = ptr to rtems_interrupt_frame
331 */
332        copy      %r9, %r26
333        .call  ARGW0=GR, ARGW1=GR
334        blr       %r0, rp
335        bv,n      0(%r8)
336
337post_user_interrupt_handler:
338
339/*
340 * Back from user handler(s)
341 * Disable external interrupts (since the interrupt handler could
342 * have turned them on) and return to the interrupted task stack (assuming
343 * (_ISR_Nest_level == 0)
344 */
345
346        rsm        HPPA_PSW_I + HPPA_PSW_R, %r0
347        ldw        -4(sp), sp
348
349/*
350 *    r3  -- (most of) &_ISR_Nest_level
351 *    r5  -- value _ISR_Nest_level
352 *    r4  -- (most of) &_Thread_Dispatch_disable_level
353 *    r6  -- value _Thread_Dispatch_disable_level
354 *    r7  -- (most of) &_ISR_Signals_to_thread_executing
355 *    r8  -- value _ISR_Signals_to_thread_executing
356 */
357
358        .import   _ISR_Nest_level,data
359        ldil      L%_ISR_Nest_level,%r3
360        ldw       R%_ISR_Nest_level(%r3),%r5
361
362        .import   _Thread_Dispatch_disable_level,data
363        ldil      L%_Thread_Dispatch_disable_level,%r4
364        ldw       R%_Thread_Dispatch_disable_level(%r4),%r6
365
366        .import    _ISR_Signals_to_thread_executing,data
367        ldil       L%_ISR_Signals_to_thread_executing,%r7
368
369/*
370 * decrement isr nest level
371 */
372        addi      -1, %r5, %r5
373        stw       %r5, R%_ISR_Nest_level(%r3)
374
375/*
376 * decrement dispatch disable level counter and, if not 0, go on
377 */
378        addi       -1,%r6,%r6
379        comibf,=   0,%r6,isr_restore
380        stw        %r6, R%_Thread_Dispatch_disable_level(%r4)
381
382/*
383 * check whether or not a context switch is necessary
384 */
385        .import    _Context_Switch_necessary,data
386        ldil       L%_Context_Switch_necessary,%r8
387        ldw        R%_Context_Switch_necessary(%r8),%r8
388        comibf,=,n 0,%r8,ISR_dispatch
389
390/*
391 * check whether or not a context switch is necessary because an ISR
392 *    sent signals to the interrupted task
393 */
394        ldw        R%_ISR_Signals_to_thread_executing(%r7),%r8
395        comibt,=,n 0,%r8,isr_restore
396
397
398/*
399 * OK, something happened while in ISR and we need to switch to a task
400 * other than the one which was interrupted or the
401 *    ISR_Signals_to_thread_executing case
402 * We also turn on interrupts, since the interrupted task had them
403 *   on (obviously :-) and Thread_Dispatch is happy to leave ints on.
404 */
405
406ISR_dispatch:
407        stw        %r0, R%_ISR_Signals_to_thread_executing(%r7)
408
409        ssm        HPPA_PSW_I, %r0
410
411        .import    _Thread_Dispatch,code
412        .call
413        bl         _Thread_Dispatch,%r2
414        ldo        128(sp),sp
415
416        ldo        -128(sp),sp
417
418isr_restore:
419
420/*
421 * enable interrupts during most of restore
422 */
423        ssm        HPPA_PSW_I, %r0
424
425/*
426 * Get a pointer to beginning of our stack frame
427 */
428        ldo        -CPU_INTERRUPT_FRAME_SIZE(sp), %arg1
429
430/*
431 * restore float
432 */
433        .call ARGW0=GR
434        bl         _CPU_Restore_float_context,%r2
435        ldo        FP_CONTEXT_OFFSET(%arg1), arg0
436
437        copy       %arg1, %arg0
438
439/*
440 *   ********** FALL THRU **********
441 */
442
443/*
444 * Jump here from bottom of Context_Switch
445 * Also called directly by _CPU_Context_Restart_self via _Thread_Restart_self
446 * restore interrupt state
447 */
448
449        .EXPORT _CPU_Context_restore
450_CPU_Context_restore:
451
452/*
453 * restore integer state
454 */
455        ldw         R1_OFFSET(arg0),%r1
456        ldw         R2_OFFSET(arg0),%r2
457        ldw         R3_OFFSET(arg0),%r3
458        ldw         R4_OFFSET(arg0),%r4
459        ldw         R5_OFFSET(arg0),%r5
460        ldw         R6_OFFSET(arg0),%r6
461        ldw         R7_OFFSET(arg0),%r7
462        ldw         R8_OFFSET(arg0),%r8
463        ldw         R9_OFFSET(arg0),%r9
464        ldw         R10_OFFSET(arg0),%r10
465        ldw         R11_OFFSET(arg0),%r11
466        ldw         R12_OFFSET(arg0),%r12
467        ldw         R13_OFFSET(arg0),%r13
468        ldw         R14_OFFSET(arg0),%r14
469        ldw         R15_OFFSET(arg0),%r15
470        ldw         R16_OFFSET(arg0),%r16
471        ldw         R17_OFFSET(arg0),%r17
472        ldw         R18_OFFSET(arg0),%r18
473        ldw         R19_OFFSET(arg0),%r19
474        ldw         R20_OFFSET(arg0),%r20
475        ldw         R21_OFFSET(arg0),%r21
476        ldw         R22_OFFSET(arg0),%r22
477        ldw         R23_OFFSET(arg0),%r23
478        ldw         R24_OFFSET(arg0),%r24
479/*
480 * skipping r25; used as scratch register below
481 * skipping r26 (arg0) until we are done with it
482 */
483        ldw         R27_OFFSET(arg0),%r27
484        ldw         R28_OFFSET(arg0),%r28
485        ldw         R29_OFFSET(arg0),%r29
486/*
487 * skipping r30 (sp) until we turn off interrupts
488 */
489        ldw         R31_OFFSET(arg0),%r31
490
491/*
492 * Turn off Q & R & I so we can write r30 and interrupt control registers
493 */
494        rsm        HPPA_PSW_Q + HPPA_PSW_R + HPPA_PSW_I, %r0
495
496/*
497 * now safe to restore r30
498 */
499        ldw         R30_OFFSET(arg0),%r30
500
501        ldw        IPSW_OFFSET(arg0), %r25
502        mtctl      %r25, ipsw
503
504        ldw        SAR_OFFSET(arg0), %r25
505        mtctl      %r25, sar
506
507        ldw        PCOQFRONT_OFFSET(arg0), %r25
508        mtctl      %r25, pcoq
509
510        ldw        PCOQBACK_OFFSET(arg0), %r25
511        mtctl      %r25, pcoq
512
513/*
514 * Load r25 with interrupts off
515 */
516        ldw         R25_OFFSET(arg0),%r25
517/*
518 * Must load r26 (arg0) last
519 */
520        ldw         R26_OFFSET(arg0),%r26
521
522isr_exit:
523        rfi
524        .EXIT
525        .PROCEND
526
527/*
528 *  This section is used to context switch floating point registers.
529 *  Ref:  6-35 of Architecture 1.1
530 *
531 *  NOTE:    since integer multiply uses the floating point unit,
532 *           we have to save/restore fp on every trap.  We cannot
533 *           just try to keep track of fp usage.
534 */
535
536        .align 32
537        .EXPORT _CPU_Save_float_context,ENTRY,PRIV_LEV=0
538_CPU_Save_float_context:
539        .PROC
540        .CALLINFO FRAME=0,NO_CALLS
541        .ENTRY
542        fstds,ma    %fr0,8(%arg0)
543        fstds,ma    %fr1,8(%arg0)
544        fstds,ma    %fr2,8(%arg0)
545        fstds,ma    %fr3,8(%arg0)
546        fstds,ma    %fr4,8(%arg0)
547        fstds,ma    %fr5,8(%arg0)
548        fstds,ma    %fr6,8(%arg0)
549        fstds,ma    %fr7,8(%arg0)
550        fstds,ma    %fr8,8(%arg0)
551        fstds,ma    %fr9,8(%arg0)
552        fstds,ma    %fr10,8(%arg0)
553        fstds,ma    %fr11,8(%arg0)
554        fstds,ma    %fr12,8(%arg0)
555        fstds,ma    %fr13,8(%arg0)
556        fstds,ma    %fr14,8(%arg0)
557        fstds,ma    %fr15,8(%arg0)
558        fstds,ma    %fr16,8(%arg0)
559        fstds,ma    %fr17,8(%arg0)
560        fstds,ma    %fr18,8(%arg0)
561        fstds,ma    %fr19,8(%arg0)
562        fstds,ma    %fr20,8(%arg0)
563        fstds,ma    %fr21,8(%arg0)
564        fstds,ma    %fr22,8(%arg0)
565        fstds,ma    %fr23,8(%arg0)
566        fstds,ma    %fr24,8(%arg0)
567        fstds,ma    %fr25,8(%arg0)
568        fstds,ma    %fr26,8(%arg0)
569        fstds,ma    %fr27,8(%arg0)
570        fstds,ma    %fr28,8(%arg0)
571        fstds,ma    %fr29,8(%arg0)
572        fstds,ma    %fr30,8(%arg0)
573        fstds       %fr31,0(%arg0)
574        bv          0(%r2)
575        addi        -(31*8), %arg0, %arg0        ; restore arg0 just for fun
576        .EXIT
577        .PROCEND
578
579        .align 32
580        .EXPORT _CPU_Restore_float_context,ENTRY,PRIV_LEV=0
581_CPU_Restore_float_context:
582        .PROC
583        .CALLINFO FRAME=0,NO_CALLS
584        .ENTRY
585        addi        (31*8), %arg0, %arg0         ; point at last double
586        fldds       0(%arg0),%fr31
587        fldds,mb    -8(%arg0),%fr30
588        fldds,mb    -8(%arg0),%fr29
589        fldds,mb    -8(%arg0),%fr28
590        fldds,mb    -8(%arg0),%fr27
591        fldds,mb    -8(%arg0),%fr26
592        fldds,mb    -8(%arg0),%fr25
593        fldds,mb    -8(%arg0),%fr24
594        fldds,mb    -8(%arg0),%fr23
595        fldds,mb    -8(%arg0),%fr22
596        fldds,mb    -8(%arg0),%fr21
597        fldds,mb    -8(%arg0),%fr20
598        fldds,mb    -8(%arg0),%fr19
599        fldds,mb    -8(%arg0),%fr18
600        fldds,mb    -8(%arg0),%fr17
601        fldds,mb    -8(%arg0),%fr16
602        fldds,mb    -8(%arg0),%fr15
603        fldds,mb    -8(%arg0),%fr14
604        fldds,mb    -8(%arg0),%fr13
605        fldds,mb    -8(%arg0),%fr12
606        fldds,mb    -8(%arg0),%fr11
607        fldds,mb    -8(%arg0),%fr10
608        fldds,mb    -8(%arg0),%fr9
609        fldds,mb    -8(%arg0),%fr8
610        fldds,mb    -8(%arg0),%fr7
611        fldds,mb    -8(%arg0),%fr6
612        fldds,mb    -8(%arg0),%fr5
613        fldds,mb    -8(%arg0),%fr4
614        fldds,mb    -8(%arg0),%fr3
615        fldds,mb    -8(%arg0),%fr2
616        fldds,mb    -8(%arg0),%fr1
617        bv          0(%r2)
618        fldds,mb    -8(%arg0),%fr0
619        .EXIT
620        .PROCEND
621
622/*
623 * These 2 small routines are unused right now.
624 * Normally we just go thru _CPU_Save_float_context (and Restore)
625 *
626 * Here we just deref the ptr and jump up, letting _CPU_Save_float_context
627 *  do the return for us.
628 */
629
630        .EXPORT _CPU_Context_save_fp,ENTRY,PRIV_LEV=0
631_CPU_Context_save_fp:
632        .PROC
633        .CALLINFO FRAME=0,NO_CALLS
634        .ENTRY
635        bl          _CPU_Save_float_context, %r0
636        ldw         0(%arg0), %arg0
637        .EXIT
638        .PROCEND
639
640        .EXPORT _CPU_Context_restore_fp,ENTRY,PRIV_LEV=0
641_CPU_Context_restore_fp:
642        .PROC
643        .CALLINFO FRAME=0,NO_CALLS
644        .ENTRY
645        bl          _CPU_Restore_float_context, %r0
646        ldw         0(%arg0), %arg0
647        .EXIT
648        .PROCEND
649
650
651/*
652 *  void _CPU_Context_switch( run_context, heir_context )
653 *
654 *  This routine performs a normal non-FP context switch.
655 */
656
657        .align 32
658        .EXPORT _CPU_Context_switch,ENTRY,PRIV_LEV=0,ARGW0=GR,ARGW1=GR
659_CPU_Context_switch:
660        .PROC
661        .CALLINFO FRAME=64
662        .ENTRY
663
664/*
665 * Save the integer context
666 */
667        stw         %r1,R1_OFFSET(arg0)
668        stw         %r2,R2_OFFSET(arg0)
669        stw         %r3,R3_OFFSET(arg0)
670        stw         %r4,R4_OFFSET(arg0)
671        stw         %r5,R5_OFFSET(arg0)
672        stw         %r6,R6_OFFSET(arg0)
673        stw         %r7,R7_OFFSET(arg0)
674        stw         %r8,R8_OFFSET(arg0)
675        stw         %r9,R9_OFFSET(arg0)
676        stw         %r10,R10_OFFSET(arg0)
677        stw         %r11,R11_OFFSET(arg0)
678        stw         %r12,R12_OFFSET(arg0)
679        stw         %r13,R13_OFFSET(arg0)
680        stw         %r14,R14_OFFSET(arg0)
681        stw         %r15,R15_OFFSET(arg0)
682        stw         %r16,R16_OFFSET(arg0)
683        stw         %r17,R17_OFFSET(arg0)
684        stw         %r18,R18_OFFSET(arg0)
685        stw         %r19,R19_OFFSET(arg0)
686        stw         %r20,R20_OFFSET(arg0)
687        stw         %r21,R21_OFFSET(arg0)
688        stw         %r22,R22_OFFSET(arg0)
689        stw         %r23,R23_OFFSET(arg0)
690        stw         %r24,R24_OFFSET(arg0)
691        stw         %r25,R25_OFFSET(arg0)
692        stw         %r26,R26_OFFSET(arg0)
693        stw         %r27,R27_OFFSET(arg0)
694        stw         %r28,R28_OFFSET(arg0)
695        stw         %r29,R29_OFFSET(arg0)
696        stw         %r30,R30_OFFSET(arg0)
697        stw         %r31,R31_OFFSET(arg0)
698
699/*
700 * fill in interrupt context section
701 */
702        stw         %r2, PCOQFRONT_OFFSET(%arg0)
703        ldo         4(%r2), %r2
704        stw         %r2, PCOQBACK_OFFSET(%arg0)
705
706/*
707 * Generate a suitable IPSW by using the system default psw
708 *  with the current low bits added in.
709 */
710
711        ldil        L%CPU_PSW_DEFAULT, %r2
712        ldo         R%CPU_PSW_DEFAULT(%r2), %r2
713        ssm         0, %arg2
714        dep         %arg2, 31, 8, %r2
715        stw         %r2, IPSW_OFFSET(%arg0)
716
717/*
718 * at this point, the running task context is completely saved
719 * Now jump to the bottom of the interrupt handler to load the
720 * heirs context
721 */
722
723        b           _CPU_Context_restore
724        copy        %arg1, %arg0
725
726        .EXIT
727        .PROCEND
728
729
730/*
731 * Find first bit
732 * NOTE:
733 *   This is used (and written) only for the ready chain code and
734 *   priority bit maps.
735 *   Any other use constitutes fraud.
736 *   Returns first bit from the least significant side.
737 *   Eg:  if input is 0x8001
738 *        output will indicate the '1' bit and return 0.
739 *   This is counter to HPPA bit numbering which calls this
740 *   bit 31.  This way simplifies the macros _CPU_Priority_Mask
741 *   and _CPU_Priority_Bits_index.
742 *
743 *   NOTE:
744 *       We just use 16 bit version
745 *       does not handle zero case
746 *
747 *  Based on the UTAH Mach libc version of ffs.
748 */
749
750        .align 32
751        .EXPORT hppa_rtems_ffs,ENTRY,PRIV_LEV=0,ARGW0=GR
752hppa_rtems_ffs:
753        .PROC
754        .CALLINFO FRAME=0,NO_CALLS
755        .ENTRY
756
757#ifdef RETURN_ERROR_ON_ZERO
758        comb,=  %arg0,%r0,ffsdone       ; If arg0 is 0
759        ldi     -1,%ret0                ;   return -1
760#endif
761
762#if BITFIELD_SIZE == 32
763        ldi     31,%ret0                ; Set return to high bit
764        extru,= %arg0,31,16,%r0         ; If low 16 bits are non-zero
765        addi,tr -16,%ret0,%ret0         ;   subtract 16 from bitpos
766        shd     %r0,%arg0,16,%arg0      ; else shift right 16 bits
767#else
768        ldi     15,%ret0                ; Set return to high bit
769#endif
770        extru,= %arg0,31,8,%r0          ; If low 8 bits are non-zero
771        addi,tr -8,%ret0,%ret0          ;   subtract 8 from bitpos
772        shd     %r0,%arg0,8,%arg0       ; else shift right 8 bits
773        extru,= %arg0,31,4,%r0          ; If low 4 bits are non-zero
774        addi,tr -4,%ret0,%ret0          ;   subtract 4 from bitpos
775        shd     %r0,%arg0,4,%arg0       ; else shift right 4 bits
776        extru,= %arg0,31,2,%r0          ; If low 2 bits are non-zero
777        addi,tr -2,%ret0,%ret0          ;   subtract 2 from bitpos
778        shd     %r0,%arg0,2,%arg0       ; else shift right 2 bits
779        extru,= %arg0,31,1,%r0          ; If low bit is non-zero
780        addi    -1,%ret0,%ret0          ;   subtract 1 from bitpos
781ffsdone:
782        bv,n    0(%r2)
783        nop
784        .EXIT
785        .PROCEND
Note: See TracBrowser for help on using the repository browser.