source: rtems/c/src/lib/libcpu/sh/sh7750/score/cpu_asm.c @ f0c0491f

Last change on this file since f0c0491f was f0c0491f, checked in by Joel Sherrill <joel.sherrill@…>, on Sep 4, 2003 at 5:32:43 PM

2003-09-04 Joel Sherrill <joel@…>

  • score/cpu_asm.c: Removed incorrect statement about copyright assignment.
  • Property mode set to 100644
File size: 7.0 KB
Line 
1/*
2 *  This file contains the basic algorithms for all assembly code used
3 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4 *  in assembly language
5 *
6 *  NOTE:  This port uses a C file with inline assembler instructions
7 *
8 *  Authors: Ralf Corsepius (corsepiu@faw.uni-ulm.de) and
9 *           Bernd Becker (becker@faw.uni-ulm.de)
10 *
11 *  COPYRIGHT (c) 1997-1998, FAW Ulm, Germany
12 *
13 *  This program is distributed in the hope that it will be useful,
14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16 *
17 *
18 *  COPYRIGHT (c) 1998.
19 *  On-Line Applications Research Corporation (OAR).
20 *
21 *  The license and distribution terms for this file may be
22 *  found in the file LICENSE in this distribution or at
23 *  http://www.OARcorp.com/rtems/license.html.
24 *
25 *  $Id$
26 */
27
28/*
29 *  This is supposed to be an assembly file.  This means that system.h
30 *  and cpu.h should not be included in a "real" cpu_asm file.  An
31 *  implementation in assembly should include "cpu_asm.h"
32 */
33
34#include <rtems/system.h>
35#include <rtems/score/cpu.h>
36#include <rtems/score/isr.h>
37#include <rtems/score/thread.h>
38#include <rtems/score/sh.h>
39#include <rtems/score/ispsh7750.h>
40#include <rtems/score/iosh7750.h>
41#include <rtems/score/sh4_regs.h>
42#include <rtems/score/sh_io.h>
43
44/* from cpu_isps.c */
45extern proc_ptr         _Hardware_isr_Table[];
46
47#if( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
48  unsigned long    *_old_stack_ptr;
49#endif
50
51register unsigned long  *stack_ptr asm("r15"); 
52
53/*
54 *  _CPU_Context_save_fp_context
55 *
56 *  This routine is responsible for saving the FP context
57 *  at *fp_context_ptr.  If the point to load the FP context
58 *  from is changed then the pointer is modified by this routine.
59 *
60 *  Sometimes a macro implementation of this is in cpu.h which dereferences
61 *  the ** and a similarly named routine in this file is passed something
62 *  like a (Context_Control_fp *).  The general rule on making this decision
63 *  is to avoid writing assembly language.
64 */
65
66void _CPU_Context_save_fp(
67  void **fp_context_ptr     /* r4 */
68)
69{
70#if SH_HAS_FPU
71 
72asm volatile("\n\
73    mov.l   @%0,r4    \n\
74    add     %1,r4\n\
75    sts.l   fpscr,@-r4\n\
76    sts.l   fpul,@-r4\n\
77    lds     %2,fpscr\n\
78    fmov    dr14,@-r4\n\
79    fmov    dr12,@-r4\n\
80    fmov    dr10,@-r4\n\
81    fmov    dr8,@-r4\n\
82    fmov    dr6,@-r4\n\
83    fmov    dr4,@-r4\n\
84    fmov    dr2,@-r4\n\
85    fmov    dr0,@-r4\n\
86    "
87#ifdef SH4_USE_X_REGISTERS
88    "\
89    lds     %3,fpscr\n\
90    fmov    xd14,@-r4\n\
91    fmov    xd12,@-r4\n\
92    fmov    xd10,@-r4\n\
93    fmov    xd8,@-r4\n\
94    fmov    xd6,@-r4\n\
95    fmov    xd4,@-r4\n\
96    fmov    xd2,@-r4\n\
97    fmov    xd0,@-r4\n\
98    " 
99#endif
100   "lds     %4,fpscr\n\
101   "
102    : 
103    : "r"(fp_context_ptr), "r"(sizeof(Context_Control_fp)), 
104      "r"(SH4_FPSCR_SZ), "r"(SH4_FPSCR_PR | SH4_FPSCR_SZ), "r"(SH4_FPSCR_PR)
105    : "r4", "r0");
106
107#endif
108}
109
110/*
111 *  _CPU_Context_restore_fp_context
112 *
113 *  This routine is responsible for restoring the FP context
114 *  at *fp_context_ptr.  If the point to load the FP context
115 *  from is changed then the pointer is modified by this routine.
116 *
117 *  Sometimes a macro implementation of this is in cpu.h which dereferences
118 *  the ** and a similarly named routine in this file is passed something
119 *  like a (Context_Control_fp *).  The general rule on making this decision
120 *  is to avoid writing assembly language.
121 */
122
123void _CPU_Context_restore_fp(
124  void **fp_context_ptr     /* r4 */
125)
126{
127#if SH_HAS_FPU
128
129asm volatile("\n\
130    mov.l   @%0,r4    \n\
131    "
132#ifdef SH4_USE_X_REGISTERS
133    "\n\
134    lds     %1,fpscr\n\
135    fmov    @r4+,xd0\n\
136    fmov    @r4+,xd2\n\
137    fmov    @r4+,xd4\n\
138    fmov    @r4+,xd6\n\
139    fmov    @r4+,xd8\n\
140    fmov    @r4+,xd10\n\
141    fmov    @r4+,xd12\n\
142    fmov    @r4+,xd14\n\
143    "
144#endif
145    "\n\
146    lds     %2,fpscr\n\
147    fmov    @r4+,dr0\n\
148    fmov    @r4+,dr2\n\
149    fmov    @r4+,dr4\n\
150    fmov    @r4+,dr6\n\
151    fmov    @r4+,dr8\n\
152    fmov    @r4+,dr10\n\
153    fmov    @r4+,dr12\n\
154    fmov    @r4+,dr14\n\
155    lds.l   @r4+,fpul\n\
156    lds.l   @r4+,fpscr\n\
157    " : 
158    : "r"(fp_context_ptr), "r"(SH4_FPSCR_PR | SH4_FPSCR_SZ), "r"(SH4_FPSCR_SZ)
159    : "r4", "r0");
160
161#endif
162}
163
164/*  _CPU_Context_switch
165 *
166 *  This routine performs a normal non-FP context switch.
167 */
168
169/*  within __CPU_Context_switch:
170 *  _CPU_Context_switch
171 *  _CPU_Context_restore
172 *
173 *  This routine is generally used only to restart self in an
174 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
175 *
176 * NOTE: It should be safe not to store r4, r5
177 *
178 * NOTE: It is doubtful if r0 is really needed to be stored
179 *
180 * NOTE: gbr is added, but should not be necessary, as it is
181 *      only used globally in this port.
182 */
183
184/*
185 * FIXME: This is an ugly hack, but we wanted to avoid recalculating
186 *        the offset each time Context_Control is changed
187 */
188void __CPU_Context_switch(
189  Context_Control  *run,        /* r4 */
190  Context_Control  *heir        /* r5 */
191)
192{
193
194asm volatile("\n\
195        .global __CPU_Context_switch\n\
196__CPU_Context_switch:\n\
197\n\
198        add     %0,r4\n\
199  \n\
200        stc.l   sr,@-r4\n\
201        stc.l   gbr,@-r4\n\
202        mov.l   r0,@-r4\n\
203        mov.l   r1,@-r4\n\
204        mov.l   r2,@-r4\n\
205        mov.l   r3,@-r4\n\
206\n\
207        mov.l   r6,@-r4\n\
208        mov.l   r7,@-r4\n\
209        mov.l   r8,@-r4\n\
210        mov.l   r9,@-r4\n\
211        mov.l   r10,@-r4\n\
212        mov.l   r11,@-r4\n\
213        mov.l   r12,@-r4\n\
214        mov.l   r13,@-r4\n\
215        mov.l   r14,@-r4\n\
216        sts.l   pr,@-r4\n\
217        sts.l   mach,@-r4\n\
218        sts.l   macl,@-r4\n\
219        mov.l   r15,@-r4\n\
220\n\
221        mov     r5, r4"
222  :: "I" (sizeof(Context_Control))
223  );
224
225  asm volatile("\n\
226        .global __CPU_Context_restore\n\
227__CPU_Context_restore:\n\
228        mov.l   @r4+,r15\n\
229        lds.l   @r4+,macl\n\
230        lds.l   @r4+,mach\n\
231        lds.l   @r4+,pr\n\
232        mov.l   @r4+,r14\n\
233        mov.l   @r4+,r13\n\
234        mov.l   @r4+,r12\n\
235        mov.l   @r4+,r11\n\
236        mov.l   @r4+,r10\n\
237        mov.l   @r4+,r9\n\
238        mov.l   @r4+,r8\n\
239        mov.l   @r4+,r7\n\
240        mov.l   @r4+,r6\n\
241\n\
242        mov.l   @r4+,r3\n\
243        mov.l   @r4+,r2\n\
244        mov.l   @r4+,r1\n\
245        mov.l   @r4+,r0\n\
246        ldc.l   @r4+,gbr\n\
247        ldc.l   @r4+,sr\n\
248\n\
249        rts\n\
250        nop" );
251}
252
253/* 
254 *  This routine provides the RTEMS interrupt management.
255 */
256 
257void __ISR_Handler( unsigned32 vector)
258{
259  register unsigned32 level;
260
261  _CPU_ISR_Disable( level );
262
263  _Thread_Dispatch_disable_level++;
264
265#if( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
266  if( _ISR_Nest_level == 0 )
267    {
268      /* Install irq stack */
269      _old_stack_ptr = stack_ptr;
270      stack_ptr = _CPU_Interrupt_stack_high;
271    }
272
273#endif
274
275  _ISR_Nest_level++;
276
277  _CPU_ISR_Enable( level );
278
279  /* call isp */
280  if( _ISR_Vector_table[ vector])
281    (*_ISR_Vector_table[ vector ])( vector );
282
283  _CPU_ISR_Disable( level );
284
285  _Thread_Dispatch_disable_level--;
286
287  _ISR_Nest_level--;
288
289#if( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
290
291  if( _ISR_Nest_level == 0 )
292    /* restore old stack pointer */
293    stack_ptr = _old_stack_ptr; 
294#endif
295
296  _CPU_ISR_Enable( level );
297
298  if ( _ISR_Nest_level )
299    return;
300
301  if ( _Thread_Dispatch_disable_level ) {
302    _ISR_Signals_to_thread_executing = FALSE;
303    return;
304  }
305
306  if ( _Context_Switch_necessary || _ISR_Signals_to_thread_executing ) {
307    _ISR_Signals_to_thread_executing = FALSE;
308    _Thread_Dispatch();
309  }
310}
Note: See TracBrowser for help on using the repository browser.