source: rtems/cpukit/score/cpu/sparc/cpu.c @ 9165349d

Last change on this file since 9165349d was 3fe2155, checked in by Sebastian Huber <sebastian.huber@…>, on 02/01/19 at 09:00:36

Remove superfluous <rtems/system.h> includes

  • Property mode set to 100644
File size: 12.3 KB
Line 
1/**
2 *  @file
3 *
4 *  @brief SPARC CPU Dependent Source
5 */
6
7/*
8 *  COPYRIGHT (c) 1989-2007.
9 *  On-Line Applications Research Corporation (OAR).
10 *
11 *  Copyright (c) 2017 embedded brains GmbH
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#ifdef HAVE_CONFIG_H
19#include "config.h"
20#endif
21
22#include <rtems/score/isr.h>
23#include <rtems/score/percpu.h>
24#include <rtems/score/tls.h>
25#include <rtems/score/thread.h>
26#include <rtems/rtems/cache.h>
27
28#if SPARC_HAS_FPU == 1
29  RTEMS_STATIC_ASSERT(
30    offsetof( Per_CPU_Control, cpu_per_cpu.fsr)
31      == SPARC_PER_CPU_FSR_OFFSET,
32    SPARC_PER_CPU_FSR_OFFSET
33  );
34
35  #if defined(SPARC_USE_LAZY_FP_SWITCH)
36    RTEMS_STATIC_ASSERT(
37      offsetof( Per_CPU_Control, cpu_per_cpu.fp_owner)
38        == SPARC_PER_CPU_FP_OWNER_OFFSET,
39      SPARC_PER_CPU_FP_OWNER_OFFSET
40    );
41  #endif
42#endif
43
44#define SPARC_ASSERT_OFFSET(field, off) \
45  RTEMS_STATIC_ASSERT( \
46    offsetof(Context_Control, field) == off ## _OFFSET, \
47    Context_Control_offset_ ## field \
48  )
49
50SPARC_ASSERT_OFFSET(g5, G5);
51SPARC_ASSERT_OFFSET(g7, G7);
52
53RTEMS_STATIC_ASSERT(
54  offsetof(Context_Control, l0_and_l1) == L0_OFFSET,
55  Context_Control_offset_L0
56);
57
58RTEMS_STATIC_ASSERT(
59  offsetof(Context_Control, l0_and_l1) + 4 == L1_OFFSET,
60  Context_Control_offset_L1
61);
62
63SPARC_ASSERT_OFFSET(l2, L2);
64SPARC_ASSERT_OFFSET(l3, L3);
65SPARC_ASSERT_OFFSET(l4, L4);
66SPARC_ASSERT_OFFSET(l5, L5);
67SPARC_ASSERT_OFFSET(l6, L6);
68SPARC_ASSERT_OFFSET(l7, L7);
69SPARC_ASSERT_OFFSET(i0, I0);
70SPARC_ASSERT_OFFSET(i1, I1);
71SPARC_ASSERT_OFFSET(i2, I2);
72SPARC_ASSERT_OFFSET(i3, I3);
73SPARC_ASSERT_OFFSET(i4, I4);
74SPARC_ASSERT_OFFSET(i5, I5);
75SPARC_ASSERT_OFFSET(i6_fp, I6_FP);
76SPARC_ASSERT_OFFSET(i7, I7);
77SPARC_ASSERT_OFFSET(o6_sp, O6_SP);
78SPARC_ASSERT_OFFSET(o7, O7);
79SPARC_ASSERT_OFFSET(psr, PSR);
80SPARC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE_STACK);
81
82#if defined(RTEMS_SMP)
83SPARC_ASSERT_OFFSET(is_executing, SPARC_CONTEXT_CONTROL_IS_EXECUTING);
84#endif
85
86#define SPARC_ASSERT_ISF_OFFSET(field, off) \
87  RTEMS_STATIC_ASSERT( \
88    offsetof(CPU_Interrupt_frame, field) == ISF_ ## off ## _OFFSET, \
89    CPU_Interrupt_frame_offset_ ## field \
90  )
91
92SPARC_ASSERT_ISF_OFFSET(psr, PSR);
93SPARC_ASSERT_ISF_OFFSET(pc, PC);
94SPARC_ASSERT_ISF_OFFSET(npc, NPC);
95SPARC_ASSERT_ISF_OFFSET(g1, G1);
96SPARC_ASSERT_ISF_OFFSET(g2, G2);
97SPARC_ASSERT_ISF_OFFSET(g3, G3);
98SPARC_ASSERT_ISF_OFFSET(g4, G4);
99SPARC_ASSERT_ISF_OFFSET(g5, G5);
100SPARC_ASSERT_ISF_OFFSET(g7, G7);
101SPARC_ASSERT_ISF_OFFSET(i0, I0);
102SPARC_ASSERT_ISF_OFFSET(i1, I1);
103SPARC_ASSERT_ISF_OFFSET(i2, I2);
104SPARC_ASSERT_ISF_OFFSET(i3, I3);
105SPARC_ASSERT_ISF_OFFSET(i4, I4);
106SPARC_ASSERT_ISF_OFFSET(i5, I5);
107SPARC_ASSERT_ISF_OFFSET(i6_fp, I6_FP);
108SPARC_ASSERT_ISF_OFFSET(i7, I7);
109SPARC_ASSERT_ISF_OFFSET(y, Y);
110SPARC_ASSERT_ISF_OFFSET(tpc, TPC);
111
112#define SPARC_ASSERT_FP_OFFSET(field, off) \
113  RTEMS_STATIC_ASSERT( \
114    offsetof(Context_Control_fp, field) == SPARC_FP_CONTEXT_OFFSET_ ## off, \
115    Context_Control_fp_offset_ ## field \
116  )
117
118SPARC_ASSERT_FP_OFFSET(f0_f1, F0_F1);
119SPARC_ASSERT_FP_OFFSET(f2_f3, F2_F3);
120SPARC_ASSERT_FP_OFFSET(f4_f5, F4_F5);
121SPARC_ASSERT_FP_OFFSET(f6_f7, F6_F7);
122SPARC_ASSERT_FP_OFFSET(f8_f9, F8_F9);
123SPARC_ASSERT_FP_OFFSET(f10_f11, F10_F11);
124SPARC_ASSERT_FP_OFFSET(f12_f13, F12_F13);
125SPARC_ASSERT_FP_OFFSET(f14_f15, F14_F15);
126SPARC_ASSERT_FP_OFFSET(f16_f17, F16_F17);
127SPARC_ASSERT_FP_OFFSET(f18_f19, F18_F19);
128SPARC_ASSERT_FP_OFFSET(f20_f21, F20_F21);
129SPARC_ASSERT_FP_OFFSET(f22_f23, F22_F23);
130SPARC_ASSERT_FP_OFFSET(f24_f25, F24_F25);
131SPARC_ASSERT_FP_OFFSET(f26_f27, F26_F27);
132SPARC_ASSERT_FP_OFFSET(f28_f29, F28_F29);
133SPARC_ASSERT_FP_OFFSET(f30_f31, F30_F31);
134SPARC_ASSERT_FP_OFFSET(fsr, FSR);
135
136RTEMS_STATIC_ASSERT(
137  sizeof(SPARC_Minimum_stack_frame) == SPARC_MINIMUM_STACK_FRAME_SIZE,
138  SPARC_MINIMUM_STACK_FRAME_SIZE
139);
140
141/* https://devel.rtems.org/ticket/2352 */
142RTEMS_STATIC_ASSERT(
143  sizeof(CPU_Interrupt_frame) % CPU_ALIGNMENT == 0,
144  CPU_Interrupt_frame_alignment
145);
146
147/*
148 *  This initializes the set of opcodes placed in each trap
149 *  table entry.  The routine which installs a handler is responsible
150 *  for filling in the fields for the _handler address and the _vector
151 *  trap type.
152 *
153 *  The constants following this structure are masks for the fields which
154 *  must be filled in when the handler is installed.
155 */
156const CPU_Trap_table_entry _CPU_Trap_slot_template = {
157  0xa1480000,      /* mov   %psr, %l0           */
158  0x29000000,      /* sethi %hi(_handler), %l4  */
159  0x81c52000,      /* jmp   %l4 + %lo(_handler) */
160  0xa6102000       /* mov   _vector, %l3        */
161};
162
163/*
164 *  _CPU_Initialize
165 *
166 *  This routine performs processor dependent initialization.
167 *
168 *  INPUT PARAMETERS: NONE
169 *
170 *  Output Parameters: NONE
171 *
172 *  NOTE: There is no need to save the pointer to the thread dispatch routine.
173 *        The SPARC's assembly code can reference it directly with no problems.
174 */
175
176void _CPU_Initialize(void)
177{
178#if defined(SPARC_USE_LAZY_FP_SWITCH)
179  __asm__ volatile (
180    ".global SPARC_THREAD_CONTROL_REGISTERS_FP_CONTEXT_OFFSET\n"
181    ".set SPARC_THREAD_CONTROL_REGISTERS_FP_CONTEXT_OFFSET, %0\n"
182    ".global SPARC_THREAD_CONTROL_FP_CONTEXT_OFFSET\n"
183    ".set SPARC_THREAD_CONTROL_FP_CONTEXT_OFFSET, %1\n"
184    :
185    : "i" (offsetof(Thread_Control, Registers.fp_context)),
186      "i" (offsetof(Thread_Control, fp_context))
187  );
188#endif
189}
190
191uint32_t   _CPU_ISR_Get_level( void )
192{
193  uint32_t   level;
194
195  sparc_get_interrupt_level( level );
196
197  return level;
198}
199
200/*
201 *  _CPU_ISR_install_raw_handler
202 *
203 *  This routine installs the specified handler as a "raw" non-executive
204 *  supported trap handler (a.k.a. interrupt service routine).
205 *
206 *  Input Parameters:
207 *    vector      - trap table entry number plus synchronous
208 *                    vs. asynchronous information
209 *    new_handler - address of the handler to be installed
210 *    old_handler - pointer to an address of the handler previously installed
211 *
212 *  Output Parameters: NONE
213 *    *new_handler - address of the handler previously installed
214 *
215 *  NOTE:
216 *
217 *  On the SPARC, there are really only 256 vectors.  However, the executive
218 *  has no easy, fast, reliable way to determine which traps are synchronous
219 *  and which are asynchronous.  By default, synchronous traps return to the
220 *  instruction which caused the interrupt.  So if you install a software
221 *  trap handler as an executive interrupt handler (which is desirable since
222 *  RTEMS takes care of window and register issues), then the executive needs
223 *  to know that the return address is to the trap rather than the instruction
224 *  following the trap.
225 *
226 *  So vectors 0 through 255 are treated as regular asynchronous traps which
227 *  provide the "correct" return address.  Vectors 256 through 512 are assumed
228 *  by the executive to be synchronous and to require that the return address
229 *  be fudged.
230 *
231 *  If you use this mechanism to install a trap handler which must reexecute
232 *  the instruction which caused the trap, then it should be installed as
233 *  an asynchronous trap.  This will avoid the executive changing the return
234 *  address.
235 */
236
237void _CPU_ISR_install_raw_handler(
238  uint32_t             vector,
239  CPU_ISR_raw_handler  new_handler,
240  CPU_ISR_raw_handler *old_handler
241)
242{
243  uint32_t               real_vector;
244  CPU_Trap_table_entry  *tbr;
245  CPU_Trap_table_entry  *slot;
246  uint32_t               u32_tbr;
247  uint32_t               u32_handler;
248
249  /*
250   *  Get the "real" trap number for this vector ignoring the synchronous
251   *  versus asynchronous indicator included with our vector numbers.
252   */
253
254  real_vector = SPARC_REAL_TRAP_NUMBER( vector );
255
256  /*
257   *  Get the current base address of the trap table and calculate a pointer
258   *  to the slot we are interested in.
259   */
260
261  sparc_get_tbr( u32_tbr );
262
263  u32_tbr &= 0xfffff000;
264
265  tbr = (CPU_Trap_table_entry *) u32_tbr;
266
267  slot = &tbr[ real_vector ];
268
269  /*
270   *  Get the address of the old_handler from the trap table.
271   *
272   *  NOTE: The old_handler returned will be bogus if it does not follow
273   *        the RTEMS model.
274   */
275
276#define HIGH_BITS_MASK   0xFFFFFC00
277#define HIGH_BITS_SHIFT  10
278#define LOW_BITS_MASK    0x000003FF
279
280  if ( slot->mov_psr_l0 == _CPU_Trap_slot_template.mov_psr_l0 ) {
281    u32_handler =
282      (slot->sethi_of_handler_to_l4 << HIGH_BITS_SHIFT) |
283      (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK);
284    *old_handler = (CPU_ISR_raw_handler) u32_handler;
285  } else
286    *old_handler = 0;
287
288  /*
289   *  Copy the template to the slot and then fix it.
290   */
291
292  *slot = _CPU_Trap_slot_template;
293
294  u32_handler = (uint32_t) new_handler;
295
296  slot->mov_vector_l3 |= vector;
297  slot->sethi_of_handler_to_l4 |=
298    (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT;
299  slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK);
300
301  /*
302   * There is no instruction cache snooping, so we need to invalidate
303   * the instruction cache to make sure that the processor sees the
304   * changes to the trap table. This step is required on both single-
305   * and multiprocessor systems.
306   *
307   * In a SMP configuration a change to the trap table might be
308   * missed by other cores. If the system state is up, the other
309   * cores can be notified using SMP messages that they need to
310   * flush their icache. If the up state has not been reached
311   * there is no need to notify other cores. They will do an
312   * automatic flush of the icache just after entering the up
313   * state, but before enabling interrupts.
314   */
315  rtems_cache_invalidate_entire_instruction();
316}
317
318void _CPU_ISR_install_vector(
319  uint32_t         vector,
320  CPU_ISR_handler  new_handler,
321  CPU_ISR_handler *old_handler
322)
323{
324   uint32_t            real_vector;
325   CPU_ISR_raw_handler ignored;
326
327  /*
328   *  Get the "real" trap number for this vector ignoring the synchronous
329   *  versus asynchronous indicator included with our vector numbers.
330   */
331
332   real_vector = SPARC_REAL_TRAP_NUMBER( vector );
333
334   /*
335    *  Return the previous ISR handler.
336    */
337
338   *old_handler = _ISR_Vector_table[ real_vector ];
339
340   /*
341    *  Install the wrapper so this ISR can be invoked properly.
342    */
343
344   _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
345
346   /*
347    *  We put the actual user ISR address in '_ISR_vector_table'.  This will
348    *  be used by the _ISR_Handler so the user gets control.
349    */
350
351    _ISR_Vector_table[ real_vector ] = new_handler;
352}
353
354void _CPU_Context_Initialize(
355  Context_Control  *the_context,
356  uint32_t         *stack_base,
357  uint32_t          size,
358  uint32_t          new_level,
359  void             *entry_point,
360  bool              is_fp,
361  void             *tls_area
362)
363{
364    uint32_t     stack_high;  /* highest "stack aligned" address */
365    uint32_t     tmp_psr;
366
367    /*
368     *  On CPUs with stacks which grow down (i.e. SPARC), we build the stack
369     *  based on the stack_high address.
370     */
371
372    stack_high = ((uint32_t)(stack_base) + size);
373    stack_high &= ~(CPU_STACK_ALIGNMENT - 1);
374
375    /*
376     *  See the README in this directory for a diagram of the stack.
377     */
378
379    the_context->o7    = ((uint32_t) entry_point) - 8;
380    the_context->o6_sp = stack_high - SPARC_MINIMUM_STACK_FRAME_SIZE;
381    the_context->i6_fp = 0;
382
383    /*
384     *  Build the PSR for the task.  Most everything can be 0 and the
385     *  CWP is corrected during the context switch.
386     *
387     *  The EF bit determines if the floating point unit is available.
388     *  The FPU is ONLY enabled if the context is associated with an FP task
389     *  and this SPARC model has an FPU.
390     */
391
392    sparc_get_psr( tmp_psr );
393    tmp_psr &= ~SPARC_PSR_PIL_MASK;
394    tmp_psr |= (new_level << 8) & SPARC_PSR_PIL_MASK;
395    tmp_psr &= ~SPARC_PSR_EF_MASK;      /* disabled by default */
396
397    /* _CPU_Context_restore_heir() relies on this */
398    _Assert( ( tmp_psr & SPARC_PSR_ET_MASK ) != 0 );
399
400#if (SPARC_HAS_FPU == 1)
401    /*
402     *  If this bit is not set, then a task gets a fault when it accesses
403     *  a floating point register.  This is a nice way to detect floating
404     *  point tasks which are not currently declared as such.
405     */
406
407    if ( is_fp )
408      tmp_psr |= SPARC_PSR_EF_MASK;
409#endif
410    the_context->psr = tmp_psr;
411
412  /*
413   *  Since THIS thread is being created, there is no way that THIS
414   *  thread can have an interrupt stack frame on its stack.
415   */
416  the_context->isr_dispatch_disable = 0;
417
418  if ( tls_area != NULL ) {
419    void *tcb = _TLS_TCB_after_TLS_block_initialize( tls_area );
420
421    the_context->g7 = (uintptr_t) tcb;
422  }
423}
Note: See TracBrowser for help on using the repository browser.