1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief SPARC CPU Dependent Source |
---|
5 | */ |
---|
6 | |
---|
7 | /* |
---|
8 | * COPYRIGHT (c) 1989-2007. |
---|
9 | * On-Line Applications Research Corporation (OAR). |
---|
10 | * |
---|
11 | * Copyright (c) 2017 embedded brains GmbH |
---|
12 | * |
---|
13 | * The license and distribution terms for this file may be |
---|
14 | * found in the file LICENSE in this distribution or at |
---|
15 | * http://www.rtems.org/license/LICENSE. |
---|
16 | */ |
---|
17 | |
---|
18 | #ifdef HAVE_CONFIG_H |
---|
19 | #include "config.h" |
---|
20 | #endif |
---|
21 | |
---|
22 | #include <rtems/system.h> |
---|
23 | #include <rtems/score/isr.h> |
---|
24 | #include <rtems/score/percpu.h> |
---|
25 | #include <rtems/score/tls.h> |
---|
26 | #include <rtems/score/thread.h> |
---|
27 | #include <rtems/rtems/cache.h> |
---|
28 | |
---|
29 | #if SPARC_HAS_FPU == 1 |
---|
30 | RTEMS_STATIC_ASSERT( |
---|
31 | offsetof( Per_CPU_Control, cpu_per_cpu.fsr) |
---|
32 | == SPARC_PER_CPU_FSR_OFFSET, |
---|
33 | SPARC_PER_CPU_FSR_OFFSET |
---|
34 | ); |
---|
35 | |
---|
36 | #if defined(SPARC_USE_LAZY_FP_SWITCH) |
---|
37 | RTEMS_STATIC_ASSERT( |
---|
38 | offsetof( Per_CPU_Control, cpu_per_cpu.fp_owner) |
---|
39 | == SPARC_PER_CPU_FP_OWNER_OFFSET, |
---|
40 | SPARC_PER_CPU_FP_OWNER_OFFSET |
---|
41 | ); |
---|
42 | #endif |
---|
43 | #endif |
---|
44 | |
---|
45 | #define SPARC_ASSERT_OFFSET(field, off) \ |
---|
46 | RTEMS_STATIC_ASSERT( \ |
---|
47 | offsetof(Context_Control, field) == off ## _OFFSET, \ |
---|
48 | Context_Control_offset_ ## field \ |
---|
49 | ) |
---|
50 | |
---|
51 | SPARC_ASSERT_OFFSET(g5, G5); |
---|
52 | SPARC_ASSERT_OFFSET(g7, G7); |
---|
53 | |
---|
54 | RTEMS_STATIC_ASSERT( |
---|
55 | offsetof(Context_Control, l0_and_l1) == L0_OFFSET, |
---|
56 | Context_Control_offset_L0 |
---|
57 | ); |
---|
58 | |
---|
59 | RTEMS_STATIC_ASSERT( |
---|
60 | offsetof(Context_Control, l0_and_l1) + 4 == L1_OFFSET, |
---|
61 | Context_Control_offset_L1 |
---|
62 | ); |
---|
63 | |
---|
64 | SPARC_ASSERT_OFFSET(l2, L2); |
---|
65 | SPARC_ASSERT_OFFSET(l3, L3); |
---|
66 | SPARC_ASSERT_OFFSET(l4, L4); |
---|
67 | SPARC_ASSERT_OFFSET(l5, L5); |
---|
68 | SPARC_ASSERT_OFFSET(l6, L6); |
---|
69 | SPARC_ASSERT_OFFSET(l7, L7); |
---|
70 | SPARC_ASSERT_OFFSET(i0, I0); |
---|
71 | SPARC_ASSERT_OFFSET(i1, I1); |
---|
72 | SPARC_ASSERT_OFFSET(i2, I2); |
---|
73 | SPARC_ASSERT_OFFSET(i3, I3); |
---|
74 | SPARC_ASSERT_OFFSET(i4, I4); |
---|
75 | SPARC_ASSERT_OFFSET(i5, I5); |
---|
76 | SPARC_ASSERT_OFFSET(i6_fp, I6_FP); |
---|
77 | SPARC_ASSERT_OFFSET(i7, I7); |
---|
78 | SPARC_ASSERT_OFFSET(o6_sp, O6_SP); |
---|
79 | SPARC_ASSERT_OFFSET(o7, O7); |
---|
80 | SPARC_ASSERT_OFFSET(psr, PSR); |
---|
81 | SPARC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE_STACK); |
---|
82 | |
---|
83 | #if defined(RTEMS_SMP) |
---|
84 | SPARC_ASSERT_OFFSET(is_executing, SPARC_CONTEXT_CONTROL_IS_EXECUTING); |
---|
85 | #endif |
---|
86 | |
---|
87 | #define SPARC_ASSERT_ISF_OFFSET(field, off) \ |
---|
88 | RTEMS_STATIC_ASSERT( \ |
---|
89 | offsetof(CPU_Interrupt_frame, field) == ISF_ ## off ## _OFFSET, \ |
---|
90 | CPU_Interrupt_frame_offset_ ## field \ |
---|
91 | ) |
---|
92 | |
---|
93 | SPARC_ASSERT_ISF_OFFSET(psr, PSR); |
---|
94 | SPARC_ASSERT_ISF_OFFSET(pc, PC); |
---|
95 | SPARC_ASSERT_ISF_OFFSET(npc, NPC); |
---|
96 | SPARC_ASSERT_ISF_OFFSET(g1, G1); |
---|
97 | SPARC_ASSERT_ISF_OFFSET(g2, G2); |
---|
98 | SPARC_ASSERT_ISF_OFFSET(g3, G3); |
---|
99 | SPARC_ASSERT_ISF_OFFSET(g4, G4); |
---|
100 | SPARC_ASSERT_ISF_OFFSET(g5, G5); |
---|
101 | SPARC_ASSERT_ISF_OFFSET(g7, G7); |
---|
102 | SPARC_ASSERT_ISF_OFFSET(i0, I0); |
---|
103 | SPARC_ASSERT_ISF_OFFSET(i1, I1); |
---|
104 | SPARC_ASSERT_ISF_OFFSET(i2, I2); |
---|
105 | SPARC_ASSERT_ISF_OFFSET(i3, I3); |
---|
106 | SPARC_ASSERT_ISF_OFFSET(i4, I4); |
---|
107 | SPARC_ASSERT_ISF_OFFSET(i5, I5); |
---|
108 | SPARC_ASSERT_ISF_OFFSET(i6_fp, I6_FP); |
---|
109 | SPARC_ASSERT_ISF_OFFSET(i7, I7); |
---|
110 | SPARC_ASSERT_ISF_OFFSET(y, Y); |
---|
111 | SPARC_ASSERT_ISF_OFFSET(tpc, TPC); |
---|
112 | |
---|
113 | #define SPARC_ASSERT_FP_OFFSET(field, off) \ |
---|
114 | RTEMS_STATIC_ASSERT( \ |
---|
115 | offsetof(Context_Control_fp, field) == SPARC_FP_CONTEXT_OFFSET_ ## off, \ |
---|
116 | Context_Control_fp_offset_ ## field \ |
---|
117 | ) |
---|
118 | |
---|
119 | SPARC_ASSERT_FP_OFFSET(f0_f1, F0_F1); |
---|
120 | SPARC_ASSERT_FP_OFFSET(f2_f3, F2_F3); |
---|
121 | SPARC_ASSERT_FP_OFFSET(f4_f5, F4_F5); |
---|
122 | SPARC_ASSERT_FP_OFFSET(f6_f7, F6_F7); |
---|
123 | SPARC_ASSERT_FP_OFFSET(f8_f9, F8_F9); |
---|
124 | SPARC_ASSERT_FP_OFFSET(f10_f11, F10_F11); |
---|
125 | SPARC_ASSERT_FP_OFFSET(f12_f13, F12_F13); |
---|
126 | SPARC_ASSERT_FP_OFFSET(f14_f15, F14_F15); |
---|
127 | SPARC_ASSERT_FP_OFFSET(f16_f17, F16_F17); |
---|
128 | SPARC_ASSERT_FP_OFFSET(f18_f19, F18_F19); |
---|
129 | SPARC_ASSERT_FP_OFFSET(f20_f21, F20_F21); |
---|
130 | SPARC_ASSERT_FP_OFFSET(f22_f23, F22_F23); |
---|
131 | SPARC_ASSERT_FP_OFFSET(f24_f25, F24_F25); |
---|
132 | SPARC_ASSERT_FP_OFFSET(f26_f27, F26_F27); |
---|
133 | SPARC_ASSERT_FP_OFFSET(f28_f29, F28_F29); |
---|
134 | SPARC_ASSERT_FP_OFFSET(f30_f31, F30_F31); |
---|
135 | SPARC_ASSERT_FP_OFFSET(fsr, FSR); |
---|
136 | |
---|
137 | RTEMS_STATIC_ASSERT( |
---|
138 | sizeof(SPARC_Minimum_stack_frame) == SPARC_MINIMUM_STACK_FRAME_SIZE, |
---|
139 | SPARC_MINIMUM_STACK_FRAME_SIZE |
---|
140 | ); |
---|
141 | |
---|
142 | /* https://devel.rtems.org/ticket/2352 */ |
---|
143 | RTEMS_STATIC_ASSERT( |
---|
144 | sizeof(CPU_Interrupt_frame) % CPU_ALIGNMENT == 0, |
---|
145 | CPU_Interrupt_frame_alignment |
---|
146 | ); |
---|
147 | |
---|
148 | /* |
---|
149 | * This initializes the set of opcodes placed in each trap |
---|
150 | * table entry. The routine which installs a handler is responsible |
---|
151 | * for filling in the fields for the _handler address and the _vector |
---|
152 | * trap type. |
---|
153 | * |
---|
154 | * The constants following this structure are masks for the fields which |
---|
155 | * must be filled in when the handler is installed. |
---|
156 | */ |
---|
157 | const CPU_Trap_table_entry _CPU_Trap_slot_template = { |
---|
158 | 0xa1480000, /* mov %psr, %l0 */ |
---|
159 | 0x29000000, /* sethi %hi(_handler), %l4 */ |
---|
160 | 0x81c52000, /* jmp %l4 + %lo(_handler) */ |
---|
161 | 0xa6102000 /* mov _vector, %l3 */ |
---|
162 | }; |
---|
163 | |
---|
164 | /* |
---|
165 | * _CPU_Initialize |
---|
166 | * |
---|
167 | * This routine performs processor dependent initialization. |
---|
168 | * |
---|
169 | * INPUT PARAMETERS: NONE |
---|
170 | * |
---|
171 | * Output Parameters: NONE |
---|
172 | * |
---|
173 | * NOTE: There is no need to save the pointer to the thread dispatch routine. |
---|
174 | * The SPARC's assembly code can reference it directly with no problems. |
---|
175 | */ |
---|
176 | |
---|
177 | void _CPU_Initialize(void) |
---|
178 | { |
---|
179 | #if defined(SPARC_USE_LAZY_FP_SWITCH) |
---|
180 | __asm__ volatile ( |
---|
181 | ".global SPARC_THREAD_CONTROL_REGISTERS_FP_CONTEXT_OFFSET\n" |
---|
182 | ".set SPARC_THREAD_CONTROL_REGISTERS_FP_CONTEXT_OFFSET, %0\n" |
---|
183 | ".global SPARC_THREAD_CONTROL_FP_CONTEXT_OFFSET\n" |
---|
184 | ".set SPARC_THREAD_CONTROL_FP_CONTEXT_OFFSET, %1\n" |
---|
185 | : |
---|
186 | : "i" (offsetof(Thread_Control, Registers.fp_context)), |
---|
187 | "i" (offsetof(Thread_Control, fp_context)) |
---|
188 | ); |
---|
189 | #endif |
---|
190 | } |
---|
191 | |
---|
192 | uint32_t _CPU_ISR_Get_level( void ) |
---|
193 | { |
---|
194 | uint32_t level; |
---|
195 | |
---|
196 | sparc_get_interrupt_level( level ); |
---|
197 | |
---|
198 | return level; |
---|
199 | } |
---|
200 | |
---|
201 | /* |
---|
202 | * _CPU_ISR_install_raw_handler |
---|
203 | * |
---|
204 | * This routine installs the specified handler as a "raw" non-executive |
---|
205 | * supported trap handler (a.k.a. interrupt service routine). |
---|
206 | * |
---|
207 | * Input Parameters: |
---|
208 | * vector - trap table entry number plus synchronous |
---|
209 | * vs. asynchronous information |
---|
210 | * new_handler - address of the handler to be installed |
---|
211 | * old_handler - pointer to an address of the handler previously installed |
---|
212 | * |
---|
213 | * Output Parameters: NONE |
---|
214 | * *new_handler - address of the handler previously installed |
---|
215 | * |
---|
216 | * NOTE: |
---|
217 | * |
---|
218 | * On the SPARC, there are really only 256 vectors. However, the executive |
---|
219 | * has no easy, fast, reliable way to determine which traps are synchronous |
---|
220 | * and which are asynchronous. By default, synchronous traps return to the |
---|
221 | * instruction which caused the interrupt. So if you install a software |
---|
222 | * trap handler as an executive interrupt handler (which is desirable since |
---|
223 | * RTEMS takes care of window and register issues), then the executive needs |
---|
224 | * to know that the return address is to the trap rather than the instruction |
---|
225 | * following the trap. |
---|
226 | * |
---|
227 | * So vectors 0 through 255 are treated as regular asynchronous traps which |
---|
228 | * provide the "correct" return address. Vectors 256 through 512 are assumed |
---|
229 | * by the executive to be synchronous and to require that the return address |
---|
230 | * be fudged. |
---|
231 | * |
---|
232 | * If you use this mechanism to install a trap handler which must reexecute |
---|
233 | * the instruction which caused the trap, then it should be installed as |
---|
234 | * an asynchronous trap. This will avoid the executive changing the return |
---|
235 | * address. |
---|
236 | */ |
---|
237 | |
---|
238 | void _CPU_ISR_install_raw_handler( |
---|
239 | uint32_t vector, |
---|
240 | proc_ptr new_handler, |
---|
241 | proc_ptr *old_handler |
---|
242 | ) |
---|
243 | { |
---|
244 | uint32_t real_vector; |
---|
245 | CPU_Trap_table_entry *tbr; |
---|
246 | CPU_Trap_table_entry *slot; |
---|
247 | uint32_t u32_tbr; |
---|
248 | uint32_t u32_handler; |
---|
249 | |
---|
250 | /* |
---|
251 | * Get the "real" trap number for this vector ignoring the synchronous |
---|
252 | * versus asynchronous indicator included with our vector numbers. |
---|
253 | */ |
---|
254 | |
---|
255 | real_vector = SPARC_REAL_TRAP_NUMBER( vector ); |
---|
256 | |
---|
257 | /* |
---|
258 | * Get the current base address of the trap table and calculate a pointer |
---|
259 | * to the slot we are interested in. |
---|
260 | */ |
---|
261 | |
---|
262 | sparc_get_tbr( u32_tbr ); |
---|
263 | |
---|
264 | u32_tbr &= 0xfffff000; |
---|
265 | |
---|
266 | tbr = (CPU_Trap_table_entry *) u32_tbr; |
---|
267 | |
---|
268 | slot = &tbr[ real_vector ]; |
---|
269 | |
---|
270 | /* |
---|
271 | * Get the address of the old_handler from the trap table. |
---|
272 | * |
---|
273 | * NOTE: The old_handler returned will be bogus if it does not follow |
---|
274 | * the RTEMS model. |
---|
275 | */ |
---|
276 | |
---|
277 | #define HIGH_BITS_MASK 0xFFFFFC00 |
---|
278 | #define HIGH_BITS_SHIFT 10 |
---|
279 | #define LOW_BITS_MASK 0x000003FF |
---|
280 | |
---|
281 | if ( slot->mov_psr_l0 == _CPU_Trap_slot_template.mov_psr_l0 ) { |
---|
282 | u32_handler = |
---|
283 | (slot->sethi_of_handler_to_l4 << HIGH_BITS_SHIFT) | |
---|
284 | (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK); |
---|
285 | *old_handler = (proc_ptr) u32_handler; |
---|
286 | } else |
---|
287 | *old_handler = 0; |
---|
288 | |
---|
289 | /* |
---|
290 | * Copy the template to the slot and then fix it. |
---|
291 | */ |
---|
292 | |
---|
293 | *slot = _CPU_Trap_slot_template; |
---|
294 | |
---|
295 | u32_handler = (uint32_t) new_handler; |
---|
296 | |
---|
297 | slot->mov_vector_l3 |= vector; |
---|
298 | slot->sethi_of_handler_to_l4 |= |
---|
299 | (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT; |
---|
300 | slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK); |
---|
301 | |
---|
302 | /* |
---|
303 | * There is no instruction cache snooping, so we need to invalidate |
---|
304 | * the instruction cache to make sure that the processor sees the |
---|
305 | * changes to the trap table. This step is required on both single- |
---|
306 | * and multiprocessor systems. |
---|
307 | * |
---|
308 | * In a SMP configuration a change to the trap table might be |
---|
309 | * missed by other cores. If the system state is up, the other |
---|
310 | * cores can be notified using SMP messages that they need to |
---|
311 | * flush their icache. If the up state has not been reached |
---|
312 | * there is no need to notify other cores. They will do an |
---|
313 | * automatic flush of the icache just after entering the up |
---|
314 | * state, but before enabling interrupts. |
---|
315 | */ |
---|
316 | rtems_cache_invalidate_entire_instruction(); |
---|
317 | } |
---|
318 | |
---|
319 | void _CPU_ISR_install_vector( |
---|
320 | uint32_t vector, |
---|
321 | proc_ptr new_handler, |
---|
322 | proc_ptr *old_handler |
---|
323 | ) |
---|
324 | { |
---|
325 | uint32_t real_vector; |
---|
326 | proc_ptr ignored; |
---|
327 | |
---|
328 | /* |
---|
329 | * Get the "real" trap number for this vector ignoring the synchronous |
---|
330 | * versus asynchronous indicator included with our vector numbers. |
---|
331 | */ |
---|
332 | |
---|
333 | real_vector = SPARC_REAL_TRAP_NUMBER( vector ); |
---|
334 | |
---|
335 | /* |
---|
336 | * Return the previous ISR handler. |
---|
337 | */ |
---|
338 | |
---|
339 | *old_handler = _ISR_Vector_table[ real_vector ]; |
---|
340 | |
---|
341 | /* |
---|
342 | * Install the wrapper so this ISR can be invoked properly. |
---|
343 | */ |
---|
344 | |
---|
345 | _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored ); |
---|
346 | |
---|
347 | /* |
---|
348 | * We put the actual user ISR address in '_ISR_vector_table'. This will |
---|
349 | * be used by the _ISR_Handler so the user gets control. |
---|
350 | */ |
---|
351 | |
---|
352 | _ISR_Vector_table[ real_vector ] = new_handler; |
---|
353 | } |
---|
354 | |
---|
355 | void _CPU_Context_Initialize( |
---|
356 | Context_Control *the_context, |
---|
357 | uint32_t *stack_base, |
---|
358 | uint32_t size, |
---|
359 | uint32_t new_level, |
---|
360 | void *entry_point, |
---|
361 | bool is_fp, |
---|
362 | void *tls_area |
---|
363 | ) |
---|
364 | { |
---|
365 | uint32_t stack_high; /* highest "stack aligned" address */ |
---|
366 | uint32_t tmp_psr; |
---|
367 | |
---|
368 | /* |
---|
369 | * On CPUs with stacks which grow down (i.e. SPARC), we build the stack |
---|
370 | * based on the stack_high address. |
---|
371 | */ |
---|
372 | |
---|
373 | stack_high = ((uint32_t)(stack_base) + size); |
---|
374 | stack_high &= ~(CPU_STACK_ALIGNMENT - 1); |
---|
375 | |
---|
376 | /* |
---|
377 | * See the README in this directory for a diagram of the stack. |
---|
378 | */ |
---|
379 | |
---|
380 | the_context->o7 = ((uint32_t) entry_point) - 8; |
---|
381 | the_context->o6_sp = stack_high - SPARC_MINIMUM_STACK_FRAME_SIZE; |
---|
382 | the_context->i6_fp = 0; |
---|
383 | |
---|
384 | /* |
---|
385 | * Build the PSR for the task. Most everything can be 0 and the |
---|
386 | * CWP is corrected during the context switch. |
---|
387 | * |
---|
388 | * The EF bit determines if the floating point unit is available. |
---|
389 | * The FPU is ONLY enabled if the context is associated with an FP task |
---|
390 | * and this SPARC model has an FPU. |
---|
391 | */ |
---|
392 | |
---|
393 | sparc_get_psr( tmp_psr ); |
---|
394 | tmp_psr &= ~SPARC_PSR_PIL_MASK; |
---|
395 | tmp_psr |= (new_level << 8) & SPARC_PSR_PIL_MASK; |
---|
396 | tmp_psr &= ~SPARC_PSR_EF_MASK; /* disabled by default */ |
---|
397 | |
---|
398 | /* _CPU_Context_restore_heir() relies on this */ |
---|
399 | _Assert( ( tmp_psr & SPARC_PSR_ET_MASK ) != 0 ); |
---|
400 | |
---|
401 | #if (SPARC_HAS_FPU == 1) |
---|
402 | /* |
---|
403 | * If this bit is not set, then a task gets a fault when it accesses |
---|
404 | * a floating point register. This is a nice way to detect floating |
---|
405 | * point tasks which are not currently declared as such. |
---|
406 | */ |
---|
407 | |
---|
408 | if ( is_fp ) |
---|
409 | tmp_psr |= SPARC_PSR_EF_MASK; |
---|
410 | #endif |
---|
411 | the_context->psr = tmp_psr; |
---|
412 | |
---|
413 | /* |
---|
414 | * Since THIS thread is being created, there is no way that THIS |
---|
415 | * thread can have an interrupt stack frame on its stack. |
---|
416 | */ |
---|
417 | the_context->isr_dispatch_disable = 0; |
---|
418 | |
---|
419 | if ( tls_area != NULL ) { |
---|
420 | void *tcb = _TLS_TCB_after_TLS_block_initialize( tls_area ); |
---|
421 | |
---|
422 | the_context->g7 = (uintptr_t) tcb; |
---|
423 | } |
---|
424 | } |
---|