source: rtems/c/src/exec/score/cpu/mips/cpu_asm.S @ 2e549dad

4.104.114.84.95
Last change on this file since 2e549dad was 2e549dad, checked in by Joel Sherrill <joel.sherrill@…>, on 03/14/01 at 00:14:18

2001-03-13 Joel Sherrill <joel@…>

  • cpu.c, cpu_asm.S, iregdef.h, rtems/score/cpu.h, rtems/score/mips.h: Merged MIPS1 and MIPS3 code reducing the number of lines of assembly. Also reimplemented some assembly routines in C further reducing the amount of assembly and increasing maintainability.
  • Property mode set to 100644
File size: 16.7 KB
Line 
1/*
2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
6 *  History:
7 *    Baseline: no_cpu
8 *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
9 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10 *          To anyone who acknowledges that the modifications to this file to
11 *          port it to the MIPS64ORION are provided "AS IS" without any
12 *          express or implied warranty:
13 *             permission to use, copy, modify, and distribute this file
14 *             for any purpose is hereby granted without fee, provided that
15 *             the above copyright notice and this notice appears in all
16 *             copies, and that the name of Transition Networks not be used in
17 *             advertising or publicity pertaining to distribution of the
18 *             software without specific, written prior permission. Transition
19 *             Networks makes no representations about the suitability
20 *             of this software for any purpose.
21 *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
22 *          the baseline of the more general MIPS port. 
23 *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
24 *          rewriting as much as possible in C and added the JMR3904 BSP
25 *          so testing could be performed on a simulator.
26 * 
27 *  COPYRIGHT (c) 1989-2000.
28 *  On-Line Applications Research Corporation (OAR).
29 *
30 *  The license and distribution terms for this file may be
31 *  found in the file LICENSE in this distribution or at
32 *  http://www.OARcorp.com/rtems/license.html.
33 *
34 *  $Id$
35 */
36
37#include <asm.h>
38#include "iregdef.h"
39#include "idtcpu.h"
40
41#define EXCP_STACK_SIZE (NREGS*R_SZ)
42#define ISR_VEC_SIZE 4
43
44/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
45 *  and MIPS ISA Level 1 (R3xxx).
46 */
47
48#if __mips == 3
49/* 64 bit register operations */
50#define ADD   dadd
51#define STREG sd
52#define LDREG ld
53#define MFCO  dmfc0
54#define MTCO  dmtc0
55#define ADDU   addu
56#define ADDIU  addiu
57#define R_SZ  8
58#define F_SZ  8
59#define SZ_INT 8
60#define SZ_INT_POW2 3
61
62/* XXX if we don't always want 64 bit register ops, then another ifdef */
63
64#elif __mips == 1
65/* 32 bit register operations*/
66#define ADD   add
67#define STREG sw
68#define LDREG lw
69#define MFCO  mfc0
70#define MTCO  mtc0
71#define ADDU  add
72#define ADDIU addi
73#define R_SZ  4
74#define F_SZ  4
75#define SZ_INT 4
76#define SZ_INT_POW2 2
77
78#else
79#error "mips assembly: what size registers do I deal with?"
80#endif
81
82
83#ifdef __GNUC__
84#define ASM_EXTERN(x,size) .extern x,size
85#else
86#define ASM_EXTERN(x,size)
87#endif
88
89/* NOTE: these constants must match the Context_Control structure in cpu.h */
90#define S0_OFFSET 0
91#define S1_OFFSET 1
92#define S2_OFFSET 2
93#define S3_OFFSET 3
94#define S4_OFFSET 4
95#define S5_OFFSET 5
96#define S6_OFFSET 6
97#define S7_OFFSET 7
98#define SP_OFFSET 8
99#define FP_OFFSET 9
100#define RA_OFFSET 10
101#define C0_SR_OFFSET 11
102#define C0_EPC_OFFSET 12
103
104/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
105#define FP0_OFFSET  0
106#define FP1_OFFSET  1
107#define FP2_OFFSET  2
108#define FP3_OFFSET  3
109#define FP4_OFFSET  4
110#define FP5_OFFSET  5
111#define FP6_OFFSET  6
112#define FP7_OFFSET  7
113#define FP8_OFFSET  8
114#define FP9_OFFSET  9
115#define FP10_OFFSET 10
116#define FP11_OFFSET 11
117#define FP12_OFFSET 12
118#define FP13_OFFSET 13
119#define FP14_OFFSET 14
120#define FP15_OFFSET 15
121#define FP16_OFFSET 16
122#define FP17_OFFSET 17
123#define FP18_OFFSET 18
124#define FP19_OFFSET 19
125#define FP20_OFFSET 20
126#define FP21_OFFSET 21
127#define FP22_OFFSET 22
128#define FP23_OFFSET 23
129#define FP24_OFFSET 24
130#define FP25_OFFSET 25
131#define FP26_OFFSET 26
132#define FP27_OFFSET 27
133#define FP28_OFFSET 28
134#define FP29_OFFSET 29
135#define FP30_OFFSET 30
136#define FP31_OFFSET 31
137
138
139/*
140 *  _CPU_Context_save_fp_context
141 *
142 *  This routine is responsible for saving the FP context
143 *  at *fp_context_ptr.  If the point to load the FP context
144 *  from is changed then the pointer is modified by this routine.
145 *
146 *  Sometimes a macro implementation of this is in cpu.h which dereferences
147 *  the ** and a similarly named routine in this file is passed something
148 *  like a (Context_Control_fp *).  The general rule on making this decision
149 *  is to avoid writing assembly language.
150 */
151
152/* void _CPU_Context_save_fp(
153 *   void **fp_context_ptr
154 * );
155 */
156
157#if ( CPU_HARDWARE_FP == FALSE )
158FRAME(_CPU_Context_save_fp,sp,0,ra)
159        .set noat
160        ld a1,(a0)
161        swc1 $f0,FP0_OFFSET*F_SZ(a1)
162        swc1 $f1,FP1_OFFSET*F_SZ(a1)
163        swc1 $f2,FP2_OFFSET*F_SZ(a1)
164        swc1 $f3,FP3_OFFSET*F_SZ(a1)
165        swc1 $f4,FP4_OFFSET*F_SZ(a1)
166        swc1 $f5,FP5_OFFSET*F_SZ(a1)
167        swc1 $f6,FP6_OFFSET*F_SZ(a1)
168        swc1 $f7,FP7_OFFSET*F_SZ(a1)
169        swc1 $f8,FP8_OFFSET*F_SZ(a1)
170        swc1 $f9,FP9_OFFSET*F_SZ(a1)
171        swc1 $f10,FP10_OFFSET*F_SZ(a1)
172        swc1 $f11,FP11_OFFSET*F_SZ(a1)
173        swc1 $f12,FP12_OFFSET*F_SZ(a1)
174        swc1 $f13,FP13_OFFSET*F_SZ(a1)
175        swc1 $f14,FP14_OFFSET*F_SZ(a1)
176        swc1 $f15,FP15_OFFSET*F_SZ(a1)
177        swc1 $f16,FP16_OFFSET*F_SZ(a1)
178        swc1 $f17,FP17_OFFSET*F_SZ(a1)
179        swc1 $f18,FP18_OFFSET*F_SZ(a1)
180        swc1 $f19,FP19_OFFSET*F_SZ(a1)
181        swc1 $f20,FP20_OFFSET*F_SZ(a1)
182        swc1 $f21,FP21_OFFSET*F_SZ(a1)
183        swc1 $f22,FP22_OFFSET*F_SZ(a1)
184        swc1 $f23,FP23_OFFSET*F_SZ(a1)
185        swc1 $f24,FP24_OFFSET*F_SZ(a1)
186        swc1 $f25,FP25_OFFSET*F_SZ(a1)
187        swc1 $f26,FP26_OFFSET*F_SZ(a1)
188        swc1 $f27,FP27_OFFSET*F_SZ(a1)
189        swc1 $f28,FP28_OFFSET*F_SZ(a1)
190        swc1 $f29,FP29_OFFSET*F_SZ(a1)
191        swc1 $f30,FP30_OFFSET*F_SZ(a1)
192        swc1 $f31,FP31_OFFSET*F_SZ(a1)
193        j ra
194        nop
195        .set at
196ENDFRAME(_CPU_Context_save_fp)
197#endif
198
199/*
200 *  _CPU_Context_restore_fp_context
201 *
202 *  This routine is responsible for restoring the FP context
203 *  at *fp_context_ptr.  If the point to load the FP context
204 *  from is changed then the pointer is modified by this routine.
205 *
206 *  Sometimes a macro implementation of this is in cpu.h which dereferences
207 *  the ** and a similarly named routine in this file is passed something
208 *  like a (Context_Control_fp *).  The general rule on making this decision
209 *  is to avoid writing assembly language.
210 */
211
212/* void _CPU_Context_restore_fp(
213 *   void **fp_context_ptr
214 * )
215 */
216
217#if ( CPU_HARDWARE_FP == FALSE )
218FRAME(_CPU_Context_restore_fp,sp,0,ra)
219        .set noat
220        ld a1,(a0)
221        lwc1 $f0,FP0_OFFSET*4(a1)
222        lwc1 $f1,FP1_OFFSET*4(a1)
223        lwc1 $f2,FP2_OFFSET*4(a1)
224        lwc1 $f3,FP3_OFFSET*4(a1)
225        lwc1 $f4,FP4_OFFSET*4(a1)
226        lwc1 $f5,FP5_OFFSET*4(a1)
227        lwc1 $f6,FP6_OFFSET*4(a1)
228        lwc1 $f7,FP7_OFFSET*4(a1)
229        lwc1 $f8,FP8_OFFSET*4(a1)
230        lwc1 $f9,FP9_OFFSET*4(a1)
231        lwc1 $f10,FP10_OFFSET*4(a1)
232        lwc1 $f11,FP11_OFFSET*4(a1)
233        lwc1 $f12,FP12_OFFSET*4(a1)
234        lwc1 $f13,FP13_OFFSET*4(a1)
235        lwc1 $f14,FP14_OFFSET*4(a1)
236        lwc1 $f15,FP15_OFFSET*4(a1)
237        lwc1 $f16,FP16_OFFSET*4(a1)
238        lwc1 $f17,FP17_OFFSET*4(a1)
239        lwc1 $f18,FP18_OFFSET*4(a1)
240        lwc1 $f19,FP19_OFFSET*4(a1)
241        lwc1 $f20,FP20_OFFSET*4(a1)
242        lwc1 $f21,FP21_OFFSET*4(a1)
243        lwc1 $f22,FP22_OFFSET*4(a1)
244        lwc1 $f23,FP23_OFFSET*4(a1)
245        lwc1 $f24,FP24_OFFSET*4(a1)
246        lwc1 $f25,FP25_OFFSET*4(a1)
247        lwc1 $f26,FP26_OFFSET*4(a1)
248        lwc1 $f27,FP27_OFFSET*4(a1)
249        lwc1 $f28,FP28_OFFSET*4(a1)
250        lwc1 $f29,FP29_OFFSET*4(a1)
251        lwc1 $f30,FP30_OFFSET*4(a1)
252        lwc1 $f31,FP31_OFFSET*4(a1)
253        j ra
254        nop
255        .set at
256ENDFRAME(_CPU_Context_restore_fp)
257#endif
258
259/*  _CPU_Context_switch
260 *
261 *  This routine performs a normal non-FP context switch.
262 */
263
264/* void _CPU_Context_switch(
265 *   Context_Control  *run,
266 *   Context_Control  *heir
267 * )
268 */
269
270FRAME(_CPU_Context_switch,sp,0,ra)
271
272        mfc0  t0,C0_SR
273        li    t1,~(SR_INTERRUPT_ENABLE_BITS)
274        STREG t0,C0_SR_OFFSET*4(a0)   /* save status register */
275        and   t0,t1
276        mtc0  t0,C0_SR                /* first disable ie bit (recommended) */
277#if __mips == 3
278        ori t0,SR_EXL|SR_IE   /* enable exception level to disable interrupts */
279        mtc0  t0,C0_SR
280#endif
281
282        STREG ra,RA_OFFSET*R_SZ(a0)         /* save current context */
283        STREG sp,SP_OFFSET*R_SZ(a0)
284        STREG fp,FP_OFFSET*R_SZ(a0)
285        STREG s0,S0_OFFSET*R_SZ(a0)
286        STREG s1,S1_OFFSET*R_SZ(a0)
287        STREG s2,S2_OFFSET*R_SZ(a0)
288        STREG s3,S3_OFFSET*R_SZ(a0)
289        STREG s4,S4_OFFSET*R_SZ(a0)
290        STREG s5,S5_OFFSET*R_SZ(a0)
291        STREG s6,S6_OFFSET*R_SZ(a0)
292        STREG s7,S7_OFFSET*R_SZ(a0)
293
294        MFC0  t0,C0_EPC
295        STREG t0,C0_EPC_OFFSET*R_SZ(a0)
296
297_CPU_Context_switch_restore:
298        LDREG s0,S0_OFFSET*R_SZ(a1)           /* restore context */
299        LDREG s1,S1_OFFSET*R_SZ(a1)
300        LDREG s2,S2_OFFSET*R_SZ(a1)
301        LDREG s3,S3_OFFSET*R_SZ(a1)
302        LDREG s4,S4_OFFSET*R_SZ(a1)
303        LDREG s5,S5_OFFSET*R_SZ(a1)
304        LDREG s6,S6_OFFSET*R_SZ(a1)
305        LDREG s7,S7_OFFSET*R_SZ(a1)
306        LDREG fp,FP_OFFSET*R_SZ(a1)
307        LDREG sp,SP_OFFSET*R_SZ(a1)
308        LDREG ra,RA_OFFSET*R_SZ(a1)
309        LDREG t0,C0_EPC_OFFSET*R_SZ(a1)
310        MTC0  t0,C0_EPC
311        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
312
313#if __mips == 3
314        andi  t0,SR_EXL
315        bnez  t0,_CPU_Context_1   /* set exception level from restore context */
316        li    t0,~SR_EXL
317        mfc0  t1,C0_SR
318        nop
319        and   t1,t0
320        mtc0  t1,C0_SR
321
322#elif __mips == 1
323        andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
324        beq   t0,$0,_CPU_Context_1          /* set level from restore context */
325        mfc0  t0,C0_SR
326        nop
327        or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  */
328        mtc0  t0,C0_SR                      /* set with enabled */
329#endif
330
331_CPU_Context_1:
332        j ra
333        nop
334ENDFRAME(_CPU_Context_switch)
335
336/*
337 *  _CPU_Context_restore
338 *
339 *  This routine is generally used only to restart self in an
340 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
341 *
342 *  NOTE: May be unnecessary to reload some registers.
343 *
344 *  void _CPU_Context_restore(
345 *    Context_Control *new_context
346 *  );
347 */
348
349FRAME(_CPU_Context_restore,sp,0,ra)
350        ADD a1,a0,zero
351        j   _CPU_Context_switch_restore
352        nop
353ENDFRAME(_CPU_Context_restore)
354
355ASM_EXTERN(_ISR_Nest_level, SZ_INT)
356ASM_EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
357ASM_EXTERN(_Context_Switch_necessary,SZ_INT)
358ASM_EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
359.extern _Thread_Dispatch
360.extern _ISR_Vector_table
361
362/*  void __ISR_Handler()
363 *
364 *  This routine provides the RTEMS interrupt management.
365 *
366 *  void _ISR_Handler()
367 *
368 *
369 *  This discussion ignores a lot of the ugly details in a real
370 *  implementation such as saving enough registers/state to be
371 *  able to do something real.  Keep in mind that the goal is
372 *  to invoke a user's ISR handler which is written in C and
373 *  uses a certain set of registers.
374 *
375 *  Also note that the exact order is to a large extent flexible.
376 *  Hardware will dictate a sequence for a certain subset of
377 *  _ISR_Handler while requirements for setting
378 *
379 *  At entry to "common" _ISR_Handler, the vector number must be
380 *  available.  On some CPUs the hardware puts either the vector
381 *  number or the offset into the vector table for this ISR in a
382 *  known place.  If the hardware does not give us this information,
383 *  then the assembly portion of RTEMS for this port will contain
384 *  a set of distinct interrupt entry points which somehow place
385 *  the vector number in a known place (which is safe if another
386 *  interrupt nests this one) and branches to _ISR_Handler.
387 *
388 */
389
390FRAME(_ISR_Handler,sp,0,ra)
391        .set noreorder
392
393        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
394
395        /* wastes a lot of stack space for context?? */
396        ADDIU    sp,sp,-EXCP_STACK_SIZE
397
398        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
399        STREG v0, R_V0*R_SZ(sp)
400        STREG v1, R_V1*R_SZ(sp)
401        STREG a0, R_A0*R_SZ(sp)
402        STREG a1, R_A1*R_SZ(sp)
403        STREG a2, R_A2*R_SZ(sp)
404        STREG a3, R_A3*R_SZ(sp)
405        STREG t0, R_T0*R_SZ(sp)
406        STREG t1, R_T1*R_SZ(sp)
407        STREG t2, R_T2*R_SZ(sp)
408        STREG t3, R_T3*R_SZ(sp)
409        STREG t4, R_T4*R_SZ(sp)
410        STREG t5, R_T5*R_SZ(sp)
411        STREG t6, R_T6*R_SZ(sp)
412        STREG t7, R_T7*R_SZ(sp)
413        mflo  k0
414        STREG t8, R_T8*R_SZ(sp)
415        STREG k0, R_MDLO*R_SZ(sp)
416        STREG t9, R_T9*R_SZ(sp)
417        mfhi  k0
418        STREG gp, R_GP*R_SZ(sp)
419        STREG fp, R_FP*R_SZ(sp)
420        STREG k0, R_MDHI*R_SZ(sp)
421        .set noat
422        STREG AT, R_AT*R_SZ(sp)
423        .set at
424
425        /* Q: Why hardcode -40 for stack add??? */
426        /* This needs to be figured out.........*/
427        ADDIU    sp,sp,-40
428        STREG    ra,32(sp)                /* store ra on the stack */
429
430/* determine if an interrupt generated this exception */
431
432        mfc0     k0,C0_CAUSE
433        and      k1,k0,CAUSE_EXCMASK
434        beq      k1, 0, _ISR_Handler_1
435        nop
436
437_ISR_Handler_Exception:
438        nop
439        b        _ISR_Handler_Exception       /* Jump to the exception code */
440        nop
441
442_ISR_Handler_1:
443
444        mfc0     k1,C0_SR
445        and      k0,k1
446        and      k0,CAUSE_IPMASK
447        beq      k0,zero,_ISR_Handler_exit
448                /* external interrupt not enabled, ignore */
449                /* but if it's not an exception or an interrupt, */
450                /* Then where did it come from??? */
451        nop
452
453  /*
454   *  save some or all context on stack
455   *  may need to save some special interrupt information for exit
456   *
457   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
458   *    if ( _ISR_Nest_level == 0 )
459   *      switch to software interrupt stack
460   *  #endif
461   */
462
463  /*
464   *  _ISR_Nest_level++;
465   */
466        LDREG  t0,_ISR_Nest_level
467        ADD    t0,t0,1
468        STREG  t0,_ISR_Nest_level
469  /*
470   *  _Thread_Dispatch_disable_level++;
471   */
472        LDREG  t1,_Thread_Dispatch_disable_level
473        ADD    t1,t1,1
474        STREG  t1,_Thread_Dispatch_disable_level
475
476  /*
477   *  Call the CPU model or BSP specific routine to decode the
478   *  interrupt source and actually vector to device ISR handlers.
479   */
480
481        jal    mips_vector_isr_handlers
482        nop
483
484  /*
485   *  --_ISR_Nest_level;
486   */
487        LDREG  t2,_ISR_Nest_level
488        ADD    t2,t2,-1
489        STREG  t2,_ISR_Nest_level
490  /*
491   *  --_Thread_Dispatch_disable_level;
492   */
493        LDREG  t1,_Thread_Dispatch_disable_level
494        ADD    t1,t1,-1
495        STREG  t1,_Thread_Dispatch_disable_level
496  /*
497   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
498   *    goto the label "exit interrupt (simple case)"
499   */
500        or  t0,t2,t1
501        bne t0,zero,_ISR_Handler_exit
502        nop
503  /*
504   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
505   *    restore stack
506   *  #endif
507   * 
508   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
509   *    goto the label "exit interrupt (simple case)"
510   */
511        LDREG t0,_Context_Switch_necessary
512        LDREG t1,_ISR_Signals_to_thread_executing
513        or    t0,t0,t1
514        beq   t0,zero,_ISR_Handler_exit
515        nop
516  /*
517   *  call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
518   */
519        jal _Thread_Dispatch
520        nop
521  /*
522   *  prepare to get out of interrupt
523   *  return from interrupt  (maybe to _ISR_Dispatch)
524   *
525   *  LABEL "exit interrupt (simple case):
526   *  prepare to get out of interrupt
527   *  return from interrupt
528   */
529
530_ISR_Handler_exit:
531        LDREG    ra,32(sp)
532        ADDIU    sp,sp,40    /* Q: Again with the 40...Is this needed? */
533
534/* restore interrupt context from stack */
535     
536        LDREG k0, R_MDLO*R_SZ(sp)           
537        mtlo  k0
538        LDREG k0, R_MDHI*R_SZ(sp)           
539        LDREG a2, R_A2*R_SZ(sp)
540        mthi  k0
541        LDREG a3, R_A3*R_SZ(sp)
542        LDREG t0, R_T0*R_SZ(sp)
543        LDREG t1, R_T1*R_SZ(sp)
544        LDREG t2, R_T2*R_SZ(sp)
545        LDREG t3, R_T3*R_SZ(sp)
546        LDREG t4, R_T4*R_SZ(sp)
547        LDREG t5, R_T5*R_SZ(sp)
548        LDREG t6, R_T6*R_SZ(sp)
549        LDREG t7, R_T7*R_SZ(sp)
550        LDREG t8, R_T8*R_SZ(sp)
551        LDREG t9, R_T9*R_SZ(sp)
552        LDREG gp, R_GP*R_SZ(sp)
553        LDREG fp, R_FP*R_SZ(sp)
554        LDREG ra, R_RA*R_SZ(sp)
555        LDREG a0, R_A0*R_SZ(sp)
556        LDREG a1, R_A1*R_SZ(sp)
557        LDREG v1, R_V1*R_SZ(sp)
558        LDREG v0, R_V0*R_SZ(sp)
559        .set noat
560        LDREG AT, R_AT*R_SZ(sp)
561        .set at
562
563        ADDIU     sp,sp,EXCP_STACK_SIZE
564
565        MFC0      k0, C0_EPC
566       
567        rfe  /* Might not need to do RFE here... */
568        j         k0
569        nop
570
571       .set    reorder
572ENDFRAME(_ISR_Handler)
573
574FRAME(mips_break,sp,0,ra)
575#if 1
576        break 0x0
577        j mips_break
578#else
579        j ra
580#endif
581        nop
582ENDFRAME(mips_break)
583
Note: See TracBrowser for help on using the repository browser.