source: rtems/cpukit/score/cpu/sparc/cpu.c @ 2764bd43

4.115
Last change on this file since 2764bd43 was 2764bd43, checked in by Alexander Krutwig <alexander.krutwig@…>, on 05/29/15 at 13:54:27

sparc: Disable FPU in interrupt context

Update #2270.

  • Property mode set to 100644
File size: 10.6 KB
Line 
1/**
2 *  @file
3 *
4 *  @brief SPARC CPU Dependent Source
5 */
6
7/*
8 *  COPYRIGHT (c) 1989-2007.
9 *  On-Line Applications Research Corporation (OAR).
10 *
11 *  The license and distribution terms for this file may be
12 *  found in the file LICENSE in this distribution or at
13 *  http://www.rtems.org/license/LICENSE.
14 */
15
16#ifdef HAVE_CONFIG_H
17#include "config.h"
18#endif
19
20#include <rtems/system.h>
21#include <rtems/score/isr.h>
22#include <rtems/score/percpu.h>
23#include <rtems/score/tls.h>
24#include <rtems/rtems/cache.h>
25
26RTEMS_STATIC_ASSERT(
27  offsetof( Per_CPU_Control, cpu_per_cpu.isr_dispatch_disable)
28    == SPARC_PER_CPU_ISR_DISPATCH_DISABLE,
29  SPARC_PER_CPU_ISR_DISPATCH_DISABLE
30);
31
32#if SPARC_HAS_FPU == 1
33  RTEMS_STATIC_ASSERT(
34    offsetof( Per_CPU_Control, cpu_per_cpu.fsr)
35      == SPARC_PER_CPU_FSR_OFFSET,
36    SPARC_PER_CPU_FSR_OFFSET
37  );
38#endif
39
40#define SPARC_ASSERT_OFFSET(field, off) \
41  RTEMS_STATIC_ASSERT( \
42    offsetof(Context_Control, field) == off ## _OFFSET, \
43    Context_Control_offset_ ## field \
44  )
45
46SPARC_ASSERT_OFFSET(g5, G5);
47SPARC_ASSERT_OFFSET(g7, G7);
48
49RTEMS_STATIC_ASSERT(
50  offsetof(Context_Control, l0_and_l1) == L0_OFFSET,
51  Context_Control_offset_L0
52);
53
54RTEMS_STATIC_ASSERT(
55  offsetof(Context_Control, l0_and_l1) + 4 == L1_OFFSET,
56  Context_Control_offset_L1
57);
58
59SPARC_ASSERT_OFFSET(l2, L2);
60SPARC_ASSERT_OFFSET(l3, L3);
61SPARC_ASSERT_OFFSET(l4, L4);
62SPARC_ASSERT_OFFSET(l5, L5);
63SPARC_ASSERT_OFFSET(l6, L6);
64SPARC_ASSERT_OFFSET(l7, L7);
65SPARC_ASSERT_OFFSET(i0, I0);
66SPARC_ASSERT_OFFSET(i1, I1);
67SPARC_ASSERT_OFFSET(i2, I2);
68SPARC_ASSERT_OFFSET(i3, I3);
69SPARC_ASSERT_OFFSET(i4, I4);
70SPARC_ASSERT_OFFSET(i5, I5);
71SPARC_ASSERT_OFFSET(i6_fp, I6_FP);
72SPARC_ASSERT_OFFSET(i7, I7);
73SPARC_ASSERT_OFFSET(o6_sp, O6_SP);
74SPARC_ASSERT_OFFSET(o7, O7);
75SPARC_ASSERT_OFFSET(psr, PSR);
76SPARC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE_STACK);
77
78#if defined(RTEMS_SMP)
79SPARC_ASSERT_OFFSET(is_executing, SPARC_CONTEXT_CONTROL_IS_EXECUTING);
80#endif
81
82#define SPARC_ASSERT_ISF_OFFSET(field, off) \
83  RTEMS_STATIC_ASSERT( \
84    offsetof(CPU_Interrupt_frame, field) == ISF_ ## off ## _OFFSET, \
85    CPU_Interrupt_frame_offset_ ## field \
86  )
87
88SPARC_ASSERT_ISF_OFFSET(psr, PSR);
89SPARC_ASSERT_ISF_OFFSET(pc, PC);
90SPARC_ASSERT_ISF_OFFSET(npc, NPC);
91SPARC_ASSERT_ISF_OFFSET(g1, G1);
92SPARC_ASSERT_ISF_OFFSET(g2, G2);
93SPARC_ASSERT_ISF_OFFSET(g3, G3);
94SPARC_ASSERT_ISF_OFFSET(g4, G4);
95SPARC_ASSERT_ISF_OFFSET(g5, G5);
96SPARC_ASSERT_ISF_OFFSET(g7, G7);
97SPARC_ASSERT_ISF_OFFSET(i0, I0);
98SPARC_ASSERT_ISF_OFFSET(i1, I1);
99SPARC_ASSERT_ISF_OFFSET(i2, I2);
100SPARC_ASSERT_ISF_OFFSET(i3, I3);
101SPARC_ASSERT_ISF_OFFSET(i4, I4);
102SPARC_ASSERT_ISF_OFFSET(i5, I5);
103SPARC_ASSERT_ISF_OFFSET(i6_fp, I6_FP);
104SPARC_ASSERT_ISF_OFFSET(i7, I7);
105SPARC_ASSERT_ISF_OFFSET(y, Y);
106SPARC_ASSERT_ISF_OFFSET(tpc, TPC);
107
108RTEMS_STATIC_ASSERT(
109  sizeof(CPU_Interrupt_frame) == CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE,
110  CPU_Interrupt_frame_size
111);
112
113/* https://devel.rtems.org/ticket/2352 */
114RTEMS_STATIC_ASSERT(
115  sizeof(CPU_Interrupt_frame) % CPU_ALIGNMENT == 0,
116  CPU_Interrupt_frame_alignment
117);
118
119/*
120 *  _CPU_Initialize
121 *
122 *  This routine performs processor dependent initialization.
123 *
124 *  INPUT PARAMETERS: NONE
125 *
126 *  Output Parameters: NONE
127 *
128 *  NOTE: There is no need to save the pointer to the thread dispatch routine.
129 *        The SPARC's assembly code can reference it directly with no problems.
130 */
131
132void _CPU_Initialize(void)
133{
134#if (SPARC_HAS_FPU == 1)
135  Context_Control_fp *pointer;
136  uint32_t            psr;
137
138  sparc_get_psr( psr );
139  psr |= SPARC_PSR_EF_MASK;
140  sparc_set_psr( psr );
141
142  /*
143   *  This seems to be the most appropriate way to obtain an initial
144   *  FP context on the SPARC.  The NULL fp context is copied it to
145   *  the task's FP context during Context_Initialize.
146   */
147
148  pointer = &_CPU_Null_fp_context;
149  _CPU_Context_save_fp( &pointer );
150#endif
151}
152
153uint32_t   _CPU_ISR_Get_level( void )
154{
155  uint32_t   level;
156
157  sparc_get_interrupt_level( level );
158
159  return level;
160}
161
162/*
163 *  _CPU_ISR_install_raw_handler
164 *
165 *  This routine installs the specified handler as a "raw" non-executive
166 *  supported trap handler (a.k.a. interrupt service routine).
167 *
168 *  Input Parameters:
169 *    vector      - trap table entry number plus synchronous
170 *                    vs. asynchronous information
171 *    new_handler - address of the handler to be installed
172 *    old_handler - pointer to an address of the handler previously installed
173 *
174 *  Output Parameters: NONE
175 *    *new_handler - address of the handler previously installed
176 *
177 *  NOTE:
178 *
179 *  On the SPARC, there are really only 256 vectors.  However, the executive
180 *  has no easy, fast, reliable way to determine which traps are synchronous
181 *  and which are asynchronous.  By default, synchronous traps return to the
182 *  instruction which caused the interrupt.  So if you install a software
183 *  trap handler as an executive interrupt handler (which is desirable since
184 *  RTEMS takes care of window and register issues), then the executive needs
185 *  to know that the return address is to the trap rather than the instruction
186 *  following the trap.
187 *
188 *  So vectors 0 through 255 are treated as regular asynchronous traps which
189 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
190 *  by the executive to be synchronous and to require that the return address
191 *  be fudged.
192 *
193 *  If you use this mechanism to install a trap handler which must reexecute
194 *  the instruction which caused the trap, then it should be installed as
195 *  an asynchronous trap.  This will avoid the executive changing the return
196 *  address.
197 */
198
199void _CPU_ISR_install_raw_handler(
200  uint32_t    vector,
201  proc_ptr    new_handler,
202  proc_ptr   *old_handler
203)
204{
205  uint32_t               real_vector;
206  CPU_Trap_table_entry  *tbr;
207  CPU_Trap_table_entry  *slot;
208  uint32_t               u32_tbr;
209  uint32_t               u32_handler;
210
211  /*
212   *  Get the "real" trap number for this vector ignoring the synchronous
213   *  versus asynchronous indicator included with our vector numbers.
214   */
215
216  real_vector = SPARC_REAL_TRAP_NUMBER( vector );
217
218  /*
219   *  Get the current base address of the trap table and calculate a pointer
220   *  to the slot we are interested in.
221   */
222
223  sparc_get_tbr( u32_tbr );
224
225  u32_tbr &= 0xfffff000;
226
227  tbr = (CPU_Trap_table_entry *) u32_tbr;
228
229  slot = &tbr[ real_vector ];
230
231  /*
232   *  Get the address of the old_handler from the trap table.
233   *
234   *  NOTE: The old_handler returned will be bogus if it does not follow
235   *        the RTEMS model.
236   */
237
238#define HIGH_BITS_MASK   0xFFFFFC00
239#define HIGH_BITS_SHIFT  10
240#define LOW_BITS_MASK    0x000003FF
241
242  if ( slot->mov_psr_l0 == _CPU_Trap_slot_template.mov_psr_l0 ) {
243    u32_handler =
244      (slot->sethi_of_handler_to_l4 << HIGH_BITS_SHIFT) |
245      (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK);
246    *old_handler = (proc_ptr) u32_handler;
247  } else
248    *old_handler = 0;
249
250  /*
251   *  Copy the template to the slot and then fix it.
252   */
253
254  *slot = _CPU_Trap_slot_template;
255
256  u32_handler = (uint32_t) new_handler;
257
258  slot->mov_vector_l3 |= vector;
259  slot->sethi_of_handler_to_l4 |=
260    (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT;
261  slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK);
262
263  /*
264   * There is no instruction cache snooping, so we need to invalidate
265   * the instruction cache to make sure that the processor sees the
266   * changes to the trap table. This step is required on both single-
267   * and multiprocessor systems.
268   *
269   * In a SMP configuration a change to the trap table might be
270   * missed by other cores. If the system state is up, the other
271   * cores can be notified using SMP messages that they need to
272   * flush their icache. If the up state has not been reached
273   * there is no need to notify other cores. They will do an
274   * automatic flush of the icache just after entering the up
275   * state, but before enabling interrupts.
276   */
277  rtems_cache_invalidate_entire_instruction();
278}
279
280void _CPU_ISR_install_vector(
281  uint32_t    vector,
282  proc_ptr    new_handler,
283  proc_ptr   *old_handler
284)
285{
286   uint32_t   real_vector;
287   proc_ptr   ignored;
288
289  /*
290   *  Get the "real" trap number for this vector ignoring the synchronous
291   *  versus asynchronous indicator included with our vector numbers.
292   */
293
294   real_vector = SPARC_REAL_TRAP_NUMBER( vector );
295
296   /*
297    *  Return the previous ISR handler.
298    */
299
300   *old_handler = _ISR_Vector_table[ real_vector ];
301
302   /*
303    *  Install the wrapper so this ISR can be invoked properly.
304    */
305
306   _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
307
308   /*
309    *  We put the actual user ISR address in '_ISR_vector_table'.  This will
310    *  be used by the _ISR_Handler so the user gets control.
311    */
312
313    _ISR_Vector_table[ real_vector ] = new_handler;
314}
315
316void _CPU_Context_Initialize(
317  Context_Control  *the_context,
318  uint32_t         *stack_base,
319  uint32_t          size,
320  uint32_t          new_level,
321  void             *entry_point,
322  bool              is_fp,
323  void             *tls_area
324)
325{
326    uint32_t     stack_high;  /* highest "stack aligned" address */
327    uint32_t     tmp_psr;
328
329    /*
330     *  On CPUs with stacks which grow down (i.e. SPARC), we build the stack
331     *  based on the stack_high address.
332     */
333
334    stack_high = ((uint32_t)(stack_base) + size);
335    stack_high &= ~(CPU_STACK_ALIGNMENT - 1);
336
337    /*
338     *  See the README in this directory for a diagram of the stack.
339     */
340
341    the_context->o7    = ((uint32_t) entry_point) - 8;
342    the_context->o6_sp = stack_high - CPU_MINIMUM_STACK_FRAME_SIZE;
343    the_context->i6_fp = 0;
344
345    /*
346     *  Build the PSR for the task.  Most everything can be 0 and the
347     *  CWP is corrected during the context switch.
348     *
349     *  The EF bit determines if the floating point unit is available.
350     *  The FPU is ONLY enabled if the context is associated with an FP task
351     *  and this SPARC model has an FPU.
352     */
353
354    sparc_get_psr( tmp_psr );
355    tmp_psr &= ~SPARC_PSR_PIL_MASK;
356    tmp_psr |= (new_level << 8) & SPARC_PSR_PIL_MASK;
357    tmp_psr &= ~SPARC_PSR_EF_MASK;      /* disabled by default */
358
359#if (SPARC_HAS_FPU == 1)
360    /*
361     *  If this bit is not set, then a task gets a fault when it accesses
362     *  a floating point register.  This is a nice way to detect floating
363     *  point tasks which are not currently declared as such.
364     */
365
366    if ( is_fp )
367      tmp_psr |= SPARC_PSR_EF_MASK;
368#endif
369    the_context->psr = tmp_psr;
370
371  /*
372   *  Since THIS thread is being created, there is no way that THIS
373   *  thread can have an _ISR_Dispatch stack frame on its stack.
374   */
375    the_context->isr_dispatch_disable = 0;
376
377  if ( tls_area != NULL ) {
378    void *tcb = _TLS_TCB_after_TLS_block_initialize( tls_area );
379
380    the_context->g7 = (uintptr_t) tcb;
381  }
382}
Note: See TracBrowser for help on using the repository browser.