source: rtems/cpukit/score/cpu/sparc/cpu.c @ 4924756

5
Last change on this file since 4924756 was c11ac2d5, checked in by Sebastian Huber <sebastian.huber@…>, on 11/14/16 at 13:42:56

sparc: Use Per_CPU_Control::isr_dispatch_disable

Update #2751.

  • Property mode set to 100644
File size: 10.7 KB
Line 
1/**
2 *  @file
3 *
4 *  @brief SPARC CPU Dependent Source
5 */
6
7/*
8 *  COPYRIGHT (c) 1989-2007.
9 *  On-Line Applications Research Corporation (OAR).
10 *
11 *  The license and distribution terms for this file may be
12 *  found in the file LICENSE in this distribution or at
13 *  http://www.rtems.org/license/LICENSE.
14 */
15
16#ifdef HAVE_CONFIG_H
17#include "config.h"
18#endif
19
20#include <rtems/system.h>
21#include <rtems/score/isr.h>
22#include <rtems/score/percpu.h>
23#include <rtems/score/tls.h>
24#include <rtems/rtems/cache.h>
25
26#if SPARC_HAS_FPU == 1
27  RTEMS_STATIC_ASSERT(
28    offsetof( Per_CPU_Control, cpu_per_cpu.fsr)
29      == SPARC_PER_CPU_FSR_OFFSET,
30    SPARC_PER_CPU_FSR_OFFSET
31  );
32#endif
33
34#define SPARC_ASSERT_OFFSET(field, off) \
35  RTEMS_STATIC_ASSERT( \
36    offsetof(Context_Control, field) == off ## _OFFSET, \
37    Context_Control_offset_ ## field \
38  )
39
40SPARC_ASSERT_OFFSET(g5, G5);
41SPARC_ASSERT_OFFSET(g7, G7);
42
43RTEMS_STATIC_ASSERT(
44  offsetof(Context_Control, l0_and_l1) == L0_OFFSET,
45  Context_Control_offset_L0
46);
47
48RTEMS_STATIC_ASSERT(
49  offsetof(Context_Control, l0_and_l1) + 4 == L1_OFFSET,
50  Context_Control_offset_L1
51);
52
53SPARC_ASSERT_OFFSET(l2, L2);
54SPARC_ASSERT_OFFSET(l3, L3);
55SPARC_ASSERT_OFFSET(l4, L4);
56SPARC_ASSERT_OFFSET(l5, L5);
57SPARC_ASSERT_OFFSET(l6, L6);
58SPARC_ASSERT_OFFSET(l7, L7);
59SPARC_ASSERT_OFFSET(i0, I0);
60SPARC_ASSERT_OFFSET(i1, I1);
61SPARC_ASSERT_OFFSET(i2, I2);
62SPARC_ASSERT_OFFSET(i3, I3);
63SPARC_ASSERT_OFFSET(i4, I4);
64SPARC_ASSERT_OFFSET(i5, I5);
65SPARC_ASSERT_OFFSET(i6_fp, I6_FP);
66SPARC_ASSERT_OFFSET(i7, I7);
67SPARC_ASSERT_OFFSET(o6_sp, O6_SP);
68SPARC_ASSERT_OFFSET(o7, O7);
69SPARC_ASSERT_OFFSET(psr, PSR);
70SPARC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE_STACK);
71
72#if defined(RTEMS_SMP)
73SPARC_ASSERT_OFFSET(is_executing, SPARC_CONTEXT_CONTROL_IS_EXECUTING);
74#endif
75
76#define SPARC_ASSERT_ISF_OFFSET(field, off) \
77  RTEMS_STATIC_ASSERT( \
78    offsetof(CPU_Interrupt_frame, field) == ISF_ ## off ## _OFFSET, \
79    CPU_Interrupt_frame_offset_ ## field \
80  )
81
82SPARC_ASSERT_ISF_OFFSET(psr, PSR);
83SPARC_ASSERT_ISF_OFFSET(pc, PC);
84SPARC_ASSERT_ISF_OFFSET(npc, NPC);
85SPARC_ASSERT_ISF_OFFSET(g1, G1);
86SPARC_ASSERT_ISF_OFFSET(g2, G2);
87SPARC_ASSERT_ISF_OFFSET(g3, G3);
88SPARC_ASSERT_ISF_OFFSET(g4, G4);
89SPARC_ASSERT_ISF_OFFSET(g5, G5);
90SPARC_ASSERT_ISF_OFFSET(g7, G7);
91SPARC_ASSERT_ISF_OFFSET(i0, I0);
92SPARC_ASSERT_ISF_OFFSET(i1, I1);
93SPARC_ASSERT_ISF_OFFSET(i2, I2);
94SPARC_ASSERT_ISF_OFFSET(i3, I3);
95SPARC_ASSERT_ISF_OFFSET(i4, I4);
96SPARC_ASSERT_ISF_OFFSET(i5, I5);
97SPARC_ASSERT_ISF_OFFSET(i6_fp, I6_FP);
98SPARC_ASSERT_ISF_OFFSET(i7, I7);
99SPARC_ASSERT_ISF_OFFSET(y, Y);
100SPARC_ASSERT_ISF_OFFSET(tpc, TPC);
101
102RTEMS_STATIC_ASSERT(
103  sizeof(SPARC_Minimum_stack_frame) == SPARC_MINIMUM_STACK_FRAME_SIZE,
104  SPARC_MINIMUM_STACK_FRAME_SIZE
105);
106
107/* https://devel.rtems.org/ticket/2352 */
108RTEMS_STATIC_ASSERT(
109  sizeof(CPU_Interrupt_frame) % CPU_ALIGNMENT == 0,
110  CPU_Interrupt_frame_alignment
111);
112
113#if (SPARC_HAS_FPU == 1) && !defined(SPARC_USE_SAFE_FP_SUPPORT)
114Context_Control_fp _CPU_Null_fp_context;
115#endif
116
117/*
118 *  _CPU_Initialize
119 *
120 *  This routine performs processor dependent initialization.
121 *
122 *  INPUT PARAMETERS: NONE
123 *
124 *  Output Parameters: NONE
125 *
126 *  NOTE: There is no need to save the pointer to the thread dispatch routine.
127 *        The SPARC's assembly code can reference it directly with no problems.
128 */
129
130void _CPU_Initialize(void)
131{
132#if (SPARC_HAS_FPU == 1) && !defined(SPARC_USE_SAFE_FP_SUPPORT)
133  Context_Control_fp *pointer;
134  uint32_t            psr;
135
136  sparc_get_psr( psr );
137  psr |= SPARC_PSR_EF_MASK;
138  sparc_set_psr( psr );
139
140  /*
141   *  This seems to be the most appropriate way to obtain an initial
142   *  FP context on the SPARC.  The NULL fp context is copied it to
143   *  the task's FP context during Context_Initialize.
144   */
145
146  pointer = &_CPU_Null_fp_context;
147  _CPU_Context_save_fp( &pointer );
148#endif
149}
150
151uint32_t   _CPU_ISR_Get_level( void )
152{
153  uint32_t   level;
154
155  sparc_get_interrupt_level( level );
156
157  return level;
158}
159
160/*
161 *  _CPU_ISR_install_raw_handler
162 *
163 *  This routine installs the specified handler as a "raw" non-executive
164 *  supported trap handler (a.k.a. interrupt service routine).
165 *
166 *  Input Parameters:
167 *    vector      - trap table entry number plus synchronous
168 *                    vs. asynchronous information
169 *    new_handler - address of the handler to be installed
170 *    old_handler - pointer to an address of the handler previously installed
171 *
172 *  Output Parameters: NONE
173 *    *new_handler - address of the handler previously installed
174 *
175 *  NOTE:
176 *
177 *  On the SPARC, there are really only 256 vectors.  However, the executive
178 *  has no easy, fast, reliable way to determine which traps are synchronous
179 *  and which are asynchronous.  By default, synchronous traps return to the
180 *  instruction which caused the interrupt.  So if you install a software
181 *  trap handler as an executive interrupt handler (which is desirable since
182 *  RTEMS takes care of window and register issues), then the executive needs
183 *  to know that the return address is to the trap rather than the instruction
184 *  following the trap.
185 *
186 *  So vectors 0 through 255 are treated as regular asynchronous traps which
187 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
188 *  by the executive to be synchronous and to require that the return address
189 *  be fudged.
190 *
191 *  If you use this mechanism to install a trap handler which must reexecute
192 *  the instruction which caused the trap, then it should be installed as
193 *  an asynchronous trap.  This will avoid the executive changing the return
194 *  address.
195 */
196
197void _CPU_ISR_install_raw_handler(
198  uint32_t    vector,
199  proc_ptr    new_handler,
200  proc_ptr   *old_handler
201)
202{
203  uint32_t               real_vector;
204  CPU_Trap_table_entry  *tbr;
205  CPU_Trap_table_entry  *slot;
206  uint32_t               u32_tbr;
207  uint32_t               u32_handler;
208
209  /*
210   *  Get the "real" trap number for this vector ignoring the synchronous
211   *  versus asynchronous indicator included with our vector numbers.
212   */
213
214  real_vector = SPARC_REAL_TRAP_NUMBER( vector );
215
216  /*
217   *  Get the current base address of the trap table and calculate a pointer
218   *  to the slot we are interested in.
219   */
220
221  sparc_get_tbr( u32_tbr );
222
223  u32_tbr &= 0xfffff000;
224
225  tbr = (CPU_Trap_table_entry *) u32_tbr;
226
227  slot = &tbr[ real_vector ];
228
229  /*
230   *  Get the address of the old_handler from the trap table.
231   *
232   *  NOTE: The old_handler returned will be bogus if it does not follow
233   *        the RTEMS model.
234   */
235
236#define HIGH_BITS_MASK   0xFFFFFC00
237#define HIGH_BITS_SHIFT  10
238#define LOW_BITS_MASK    0x000003FF
239
240  if ( slot->mov_psr_l0 == _CPU_Trap_slot_template.mov_psr_l0 ) {
241    u32_handler =
242      (slot->sethi_of_handler_to_l4 << HIGH_BITS_SHIFT) |
243      (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK);
244    *old_handler = (proc_ptr) u32_handler;
245  } else
246    *old_handler = 0;
247
248  /*
249   *  Copy the template to the slot and then fix it.
250   */
251
252  *slot = _CPU_Trap_slot_template;
253
254  u32_handler = (uint32_t) new_handler;
255
256  slot->mov_vector_l3 |= vector;
257  slot->sethi_of_handler_to_l4 |=
258    (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT;
259  slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK);
260
261  /*
262   * There is no instruction cache snooping, so we need to invalidate
263   * the instruction cache to make sure that the processor sees the
264   * changes to the trap table. This step is required on both single-
265   * and multiprocessor systems.
266   *
267   * In a SMP configuration a change to the trap table might be
268   * missed by other cores. If the system state is up, the other
269   * cores can be notified using SMP messages that they need to
270   * flush their icache. If the up state has not been reached
271   * there is no need to notify other cores. They will do an
272   * automatic flush of the icache just after entering the up
273   * state, but before enabling interrupts.
274   */
275  rtems_cache_invalidate_entire_instruction();
276}
277
278void _CPU_ISR_install_vector(
279  uint32_t    vector,
280  proc_ptr    new_handler,
281  proc_ptr   *old_handler
282)
283{
284   uint32_t   real_vector;
285   proc_ptr   ignored;
286
287  /*
288   *  Get the "real" trap number for this vector ignoring the synchronous
289   *  versus asynchronous indicator included with our vector numbers.
290   */
291
292   real_vector = SPARC_REAL_TRAP_NUMBER( vector );
293
294   /*
295    *  Return the previous ISR handler.
296    */
297
298   *old_handler = _ISR_Vector_table[ real_vector ];
299
300   /*
301    *  Install the wrapper so this ISR can be invoked properly.
302    */
303
304   _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
305
306   /*
307    *  We put the actual user ISR address in '_ISR_vector_table'.  This will
308    *  be used by the _ISR_Handler so the user gets control.
309    */
310
311    _ISR_Vector_table[ real_vector ] = new_handler;
312}
313
314void _CPU_Context_Initialize(
315  Context_Control  *the_context,
316  uint32_t         *stack_base,
317  uint32_t          size,
318  uint32_t          new_level,
319  void             *entry_point,
320  bool              is_fp,
321  void             *tls_area
322)
323{
324    uint32_t     stack_high;  /* highest "stack aligned" address */
325    uint32_t     tmp_psr;
326
327    /*
328     *  On CPUs with stacks which grow down (i.e. SPARC), we build the stack
329     *  based on the stack_high address.
330     */
331
332    stack_high = ((uint32_t)(stack_base) + size);
333    stack_high &= ~(CPU_STACK_ALIGNMENT - 1);
334
335    /*
336     *  See the README in this directory for a diagram of the stack.
337     */
338
339    the_context->o7    = ((uint32_t) entry_point) - 8;
340    the_context->o6_sp = stack_high - SPARC_MINIMUM_STACK_FRAME_SIZE;
341    the_context->i6_fp = 0;
342
343    /*
344     *  Build the PSR for the task.  Most everything can be 0 and the
345     *  CWP is corrected during the context switch.
346     *
347     *  The EF bit determines if the floating point unit is available.
348     *  The FPU is ONLY enabled if the context is associated with an FP task
349     *  and this SPARC model has an FPU.
350     */
351
352    sparc_get_psr( tmp_psr );
353    tmp_psr &= ~SPARC_PSR_PIL_MASK;
354    tmp_psr |= (new_level << 8) & SPARC_PSR_PIL_MASK;
355    tmp_psr &= ~SPARC_PSR_EF_MASK;      /* disabled by default */
356
357    /* _CPU_Context_restore_heir() relies on this */
358    _Assert( ( tmp_psr & SPARC_PSR_ET_MASK ) != 0 );
359
360#if (SPARC_HAS_FPU == 1)
361    /*
362     *  If this bit is not set, then a task gets a fault when it accesses
363     *  a floating point register.  This is a nice way to detect floating
364     *  point tasks which are not currently declared as such.
365     */
366
367    if ( is_fp )
368      tmp_psr |= SPARC_PSR_EF_MASK;
369#endif
370    the_context->psr = tmp_psr;
371
372  /*
373   *  Since THIS thread is being created, there is no way that THIS
374   *  thread can have an _ISR_Dispatch stack frame on its stack.
375   */
376    the_context->isr_dispatch_disable = 0;
377
378  if ( tls_area != NULL ) {
379    void *tcb = _TLS_TCB_after_TLS_block_initialize( tls_area );
380
381    the_context->g7 = (uintptr_t) tcb;
382  }
383}
Note: See TracBrowser for help on using the repository browser.