1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * This include file defines the per CPU information required |
---|
5 | * by RTEMS. |
---|
6 | */ |
---|
7 | |
---|
8 | /* |
---|
9 | * COPYRIGHT (c) 1989-2011. |
---|
10 | * On-Line Applications Research Corporation (OAR). |
---|
11 | * |
---|
12 | * Copyright (c) 2012, 2018 embedded brains GmbH |
---|
13 | * |
---|
14 | * The license and distribution terms for this file may be |
---|
15 | * found in the file LICENSE in this distribution or at |
---|
16 | * http://www.rtems.org/license/LICENSE. |
---|
17 | */ |
---|
18 | |
---|
19 | #ifndef _RTEMS_PERCPU_H |
---|
20 | #define _RTEMS_PERCPU_H |
---|
21 | |
---|
22 | #include <rtems/score/cpuimpl.h> |
---|
23 | |
---|
24 | #if defined( ASM ) |
---|
25 | #include <rtems/asm.h> |
---|
26 | #else |
---|
27 | #include <rtems/score/assert.h> |
---|
28 | #include <rtems/score/chain.h> |
---|
29 | #include <rtems/score/isrlock.h> |
---|
30 | #include <rtems/score/smp.h> |
---|
31 | #include <rtems/score/timestamp.h> |
---|
32 | #include <rtems/score/watchdog.h> |
---|
33 | #endif |
---|
34 | |
---|
35 | #ifdef __cplusplus |
---|
36 | extern "C" { |
---|
37 | #endif |
---|
38 | |
---|
39 | #if defined(RTEMS_SMP) |
---|
40 | #if defined(RTEMS_PROFILING) |
---|
41 | #define PER_CPU_CONTROL_SIZE_APPROX ( 512 + CPU_INTERRUPT_FRAME_SIZE ) |
---|
42 | #elif defined(RTEMS_DEBUG) || CPU_SIZEOF_POINTER > 4 |
---|
43 | #define PER_CPU_CONTROL_SIZE_APPROX ( 256 + CPU_INTERRUPT_FRAME_SIZE ) |
---|
44 | #else |
---|
45 | #define PER_CPU_CONTROL_SIZE_APPROX ( 128 + CPU_INTERRUPT_FRAME_SIZE ) |
---|
46 | #endif |
---|
47 | |
---|
48 | /* |
---|
49 | * This ensures that on SMP configurations the individual per-CPU controls |
---|
50 | * are on different cache lines to prevent false sharing. This define can be |
---|
51 | * used in assembler code to easily get the per-CPU control for a particular |
---|
52 | * processor. |
---|
53 | */ |
---|
54 | #if PER_CPU_CONTROL_SIZE_APPROX > 1024 |
---|
55 | #define PER_CPU_CONTROL_SIZE_LOG2 11 |
---|
56 | #elif PER_CPU_CONTROL_SIZE_APPROX > 512 |
---|
57 | #define PER_CPU_CONTROL_SIZE_LOG2 10 |
---|
58 | #elif PER_CPU_CONTROL_SIZE_APPROX > 256 |
---|
59 | #define PER_CPU_CONTROL_SIZE_LOG2 9 |
---|
60 | #elif PER_CPU_CONTROL_SIZE_APPROX > 128 |
---|
61 | #define PER_CPU_CONTROL_SIZE_LOG2 8 |
---|
62 | #else |
---|
63 | #define PER_CPU_CONTROL_SIZE_LOG2 7 |
---|
64 | #endif |
---|
65 | |
---|
66 | #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 ) |
---|
67 | #endif |
---|
68 | |
---|
69 | #if !defined( ASM ) |
---|
70 | |
---|
71 | struct Record_Control; |
---|
72 | |
---|
73 | struct _Thread_Control; |
---|
74 | |
---|
75 | struct Scheduler_Context; |
---|
76 | |
---|
77 | /** |
---|
78 | * @defgroup PerCPU RTEMS Per CPU Information |
---|
79 | * |
---|
80 | * @ingroup RTEMSScore |
---|
81 | * |
---|
82 | * This defines the per CPU state information required by RTEMS |
---|
83 | * and the BSP. In an SMP configuration, there will be multiple |
---|
84 | * instances of this data structure -- one per CPU -- and the |
---|
85 | * current CPU number will be used as the index. |
---|
86 | */ |
---|
87 | |
---|
88 | /**@{*/ |
---|
89 | |
---|
90 | #if defined( RTEMS_SMP ) |
---|
91 | |
---|
92 | /** |
---|
93 | * @brief State of a processor. |
---|
94 | * |
---|
95 | * The processor state controls the life cycle of processors at the lowest |
---|
96 | * level. No multi-threading or other high-level concepts matter here. |
---|
97 | * |
---|
98 | * State changes must be initiated via _Per_CPU_State_change(). This function |
---|
99 | * may not return in case someone requested a shutdown. The |
---|
100 | * _SMP_Send_message() function will be used to notify other processors about |
---|
101 | * state changes if the other processor is in the up state. |
---|
102 | * |
---|
103 | * Due to the sequential nature of the basic system initialization one |
---|
104 | * processor has a special role. It is the processor executing the boot_card() |
---|
105 | * function. This processor is called the boot processor. All other |
---|
106 | * processors are called secondary. |
---|
107 | * |
---|
108 | * @dot |
---|
109 | * digraph states { |
---|
110 | * i [label="PER_CPU_STATE_INITIAL"]; |
---|
111 | * rdy [label="PER_CPU_STATE_READY_TO_START_MULTITASKING"]; |
---|
112 | * reqsm [label="PER_CPU_STATE_REQUEST_START_MULTITASKING"]; |
---|
113 | * u [label="PER_CPU_STATE_UP"]; |
---|
114 | * s [label="PER_CPU_STATE_SHUTDOWN"]; |
---|
115 | * i -> rdy [label="processor\ncompleted initialization"]; |
---|
116 | * rdy -> reqsm [label="boot processor\ncompleted initialization"]; |
---|
117 | * reqsm -> u [label="processor\nstarts multitasking"]; |
---|
118 | * i -> s; |
---|
119 | * rdy -> s; |
---|
120 | * reqsm -> s; |
---|
121 | * u -> s; |
---|
122 | * } |
---|
123 | * @enddot |
---|
124 | */ |
---|
125 | typedef enum { |
---|
126 | /** |
---|
127 | * @brief The per CPU controls are initialized to zero. |
---|
128 | * |
---|
129 | * The boot processor executes the sequential boot code in this state. The |
---|
130 | * secondary processors should perform their basic initialization now and |
---|
131 | * change into the PER_CPU_STATE_READY_TO_START_MULTITASKING state once this |
---|
132 | * is complete. |
---|
133 | */ |
---|
134 | PER_CPU_STATE_INITIAL, |
---|
135 | |
---|
136 | /** |
---|
137 | * @brief Processor is ready to start multitasking. |
---|
138 | * |
---|
139 | * The secondary processor performed its basic initialization and is ready to |
---|
140 | * receive inter-processor interrupts. Interrupt delivery must be disabled |
---|
141 | * in this state, but requested inter-processor interrupts must be recorded |
---|
142 | * and must be delivered once the secondary processor enables interrupts for |
---|
143 | * the first time. The boot processor will wait for all secondary processors |
---|
144 | * to change into this state. In case a secondary processor does not reach |
---|
145 | * this state the system will not start. The secondary processors wait now |
---|
146 | * for a change into the PER_CPU_STATE_REQUEST_START_MULTITASKING state set |
---|
147 | * by the boot processor once all secondary processors reached the |
---|
148 | * PER_CPU_STATE_READY_TO_START_MULTITASKING state. |
---|
149 | */ |
---|
150 | PER_CPU_STATE_READY_TO_START_MULTITASKING, |
---|
151 | |
---|
152 | /** |
---|
153 | * @brief Multitasking start of processor is requested. |
---|
154 | * |
---|
155 | * The boot processor completed system initialization and is about to perform |
---|
156 | * a context switch to its heir thread. Secondary processors should now |
---|
157 | * issue a context switch to the heir thread. This normally enables |
---|
158 | * interrupts on the processor for the first time. |
---|
159 | */ |
---|
160 | PER_CPU_STATE_REQUEST_START_MULTITASKING, |
---|
161 | |
---|
162 | /** |
---|
163 | * @brief Normal multitasking state. |
---|
164 | */ |
---|
165 | PER_CPU_STATE_UP, |
---|
166 | |
---|
167 | /** |
---|
168 | * @brief This is the terminal state. |
---|
169 | */ |
---|
170 | PER_CPU_STATE_SHUTDOWN |
---|
171 | } Per_CPU_State; |
---|
172 | |
---|
173 | #endif /* defined( RTEMS_SMP ) */ |
---|
174 | |
---|
175 | /** |
---|
176 | * @brief Per-CPU statistics. |
---|
177 | */ |
---|
178 | typedef struct { |
---|
179 | #if defined( RTEMS_PROFILING ) |
---|
180 | /** |
---|
181 | * @brief The thread dispatch disabled begin instant in CPU counter ticks. |
---|
182 | * |
---|
183 | * This value is used to measure the time of disabled thread dispatching. |
---|
184 | */ |
---|
185 | CPU_Counter_ticks thread_dispatch_disabled_instant; |
---|
186 | |
---|
187 | /** |
---|
188 | * @brief The maximum time of disabled thread dispatching in CPU counter |
---|
189 | * ticks. |
---|
190 | */ |
---|
191 | CPU_Counter_ticks max_thread_dispatch_disabled_time; |
---|
192 | |
---|
193 | /** |
---|
194 | * @brief The maximum time spent to process a single sequence of nested |
---|
195 | * interrupts in CPU counter ticks. |
---|
196 | * |
---|
197 | * This is the time interval between the change of the interrupt nest level |
---|
198 | * from zero to one and the change back from one to zero. |
---|
199 | */ |
---|
200 | CPU_Counter_ticks max_interrupt_time; |
---|
201 | |
---|
202 | /** |
---|
203 | * @brief The maximum interrupt delay in CPU counter ticks if supported by |
---|
204 | * the hardware. |
---|
205 | */ |
---|
206 | CPU_Counter_ticks max_interrupt_delay; |
---|
207 | |
---|
208 | /** |
---|
209 | * @brief Count of times when the thread dispatch disable level changes from |
---|
210 | * zero to one in thread context. |
---|
211 | * |
---|
212 | * This value may overflow. |
---|
213 | */ |
---|
214 | uint64_t thread_dispatch_disabled_count; |
---|
215 | |
---|
216 | /** |
---|
217 | * @brief Total time of disabled thread dispatching in CPU counter ticks. |
---|
218 | * |
---|
219 | * The average time of disabled thread dispatching is the total time of |
---|
220 | * disabled thread dispatching divided by the thread dispatch disabled |
---|
221 | * count. |
---|
222 | * |
---|
223 | * This value may overflow. |
---|
224 | */ |
---|
225 | uint64_t total_thread_dispatch_disabled_time; |
---|
226 | |
---|
227 | /** |
---|
228 | * @brief Count of times when the interrupt nest level changes from zero to |
---|
229 | * one. |
---|
230 | * |
---|
231 | * This value may overflow. |
---|
232 | */ |
---|
233 | uint64_t interrupt_count; |
---|
234 | |
---|
235 | /** |
---|
236 | * @brief Total time of interrupt processing in CPU counter ticks. |
---|
237 | * |
---|
238 | * The average time of interrupt processing is the total time of interrupt |
---|
239 | * processing divided by the interrupt count. |
---|
240 | * |
---|
241 | * This value may overflow. |
---|
242 | */ |
---|
243 | uint64_t total_interrupt_time; |
---|
244 | #endif /* defined( RTEMS_PROFILING ) */ |
---|
245 | } Per_CPU_Stats; |
---|
246 | |
---|
247 | /** |
---|
248 | * @brief Per-CPU watchdog header index. |
---|
249 | */ |
---|
250 | typedef enum { |
---|
251 | /** |
---|
252 | * @brief Index for tick clock per-CPU watchdog header. |
---|
253 | * |
---|
254 | * The reference time point for the tick clock is the system start. The |
---|
255 | * clock resolution is one system clock tick. It is used for the system |
---|
256 | * clock tick based time services. |
---|
257 | */ |
---|
258 | PER_CPU_WATCHDOG_TICKS, |
---|
259 | |
---|
260 | /** |
---|
261 | * @brief Index for realtime clock per-CPU watchdog header. |
---|
262 | * |
---|
263 | * The reference time point for the realtime clock is the POSIX Epoch. The |
---|
264 | * clock resolution is one nanosecond. It is used for the time of day |
---|
265 | * services and the POSIX services using CLOCK_REALTIME. |
---|
266 | */ |
---|
267 | PER_CPU_WATCHDOG_REALTIME, |
---|
268 | |
---|
269 | /** |
---|
270 | * @brief Index for monotonic clock per-CPU watchdog header. |
---|
271 | * |
---|
272 | * The reference time point for the monotonic clock is the system start. The |
---|
273 | * clock resolution is one nanosecond. It is used for the POSIX services |
---|
274 | * using CLOCK_MONOTONIC. |
---|
275 | */ |
---|
276 | PER_CPU_WATCHDOG_MONOTONIC, |
---|
277 | |
---|
278 | /** |
---|
279 | * @brief Count of per-CPU watchdog headers. |
---|
280 | */ |
---|
281 | PER_CPU_WATCHDOG_COUNT |
---|
282 | } Per_CPU_Watchdog_index; |
---|
283 | |
---|
284 | /** |
---|
285 | * @brief Per CPU Core Structure |
---|
286 | * |
---|
287 | * This structure is used to hold per core state information. |
---|
288 | */ |
---|
289 | typedef struct Per_CPU_Control { |
---|
290 | #if CPU_PER_CPU_CONTROL_SIZE > 0 |
---|
291 | /** |
---|
292 | * @brief CPU port specific control. |
---|
293 | */ |
---|
294 | CPU_Per_CPU_control cpu_per_cpu; |
---|
295 | #endif |
---|
296 | |
---|
297 | /** |
---|
298 | * @brief The interrupt stack low address for this processor. |
---|
299 | */ |
---|
300 | void *interrupt_stack_low; |
---|
301 | |
---|
302 | /** |
---|
303 | * @brief The interrupt stack high address for this processor. |
---|
304 | */ |
---|
305 | void *interrupt_stack_high; |
---|
306 | |
---|
307 | /** |
---|
308 | * This contains the current interrupt nesting level on this |
---|
309 | * CPU. |
---|
310 | */ |
---|
311 | uint32_t isr_nest_level; |
---|
312 | |
---|
313 | /** |
---|
314 | * @brief Indicetes if an ISR thread dispatch is disabled. |
---|
315 | * |
---|
316 | * This flag is context switched with each thread. It indicates that this |
---|
317 | * thread has an interrupt stack frame on its stack. By using this flag, we |
---|
318 | * can avoid nesting more interrupt dispatching attempts on a previously |
---|
319 | * interrupted thread's stack. |
---|
320 | */ |
---|
321 | uint32_t isr_dispatch_disable; |
---|
322 | |
---|
323 | /** |
---|
324 | * @brief The thread dispatch critical section nesting counter which is used |
---|
325 | * to prevent context switches at inopportune moments. |
---|
326 | */ |
---|
327 | volatile uint32_t thread_dispatch_disable_level; |
---|
328 | |
---|
329 | /** |
---|
330 | * @brief This is set to true when this processor needs to run the thread |
---|
331 | * dispatcher. |
---|
332 | * |
---|
333 | * It is volatile since interrupts may alter this flag. |
---|
334 | * |
---|
335 | * This member is not protected by a lock and must be accessed only by this |
---|
336 | * processor. Code (e.g. scheduler and post-switch action requests) running |
---|
337 | * on another processors must use an inter-processor interrupt to set the |
---|
338 | * thread dispatch necessary indicator to true. |
---|
339 | * |
---|
340 | * @see _Thread_Get_heir_and_make_it_executing(). |
---|
341 | */ |
---|
342 | volatile bool dispatch_necessary; |
---|
343 | |
---|
344 | /* |
---|
345 | * Ensure that the executing member is at least 4-byte aligned, see |
---|
346 | * PER_CPU_OFFSET_EXECUTING. This is necessary on CPU ports with relaxed |
---|
347 | * alignment restrictions, e.g. type alignment is less than the type size. |
---|
348 | */ |
---|
349 | bool reserved_for_executing_alignment[ 3 ]; |
---|
350 | |
---|
351 | /** |
---|
352 | * @brief This is the thread executing on this processor. |
---|
353 | * |
---|
354 | * This member is not protected by a lock. The only writer is this |
---|
355 | * processor. |
---|
356 | * |
---|
357 | * On SMP configurations a thread may be registered as executing on more than |
---|
358 | * one processor in case a thread migration is in progress. On SMP |
---|
359 | * configurations use _Thread_Is_executing_on_a_processor() to figure out if |
---|
360 | * a thread context is executing on a processor. |
---|
361 | */ |
---|
362 | struct _Thread_Control *executing; |
---|
363 | |
---|
364 | /** |
---|
365 | * @brief This is the heir thread for this processor. |
---|
366 | * |
---|
367 | * This member is not protected by a lock. The only writer after |
---|
368 | * multitasking start is the scheduler owning this processor. It is assumed |
---|
369 | * that stores to pointers are atomic on all supported SMP architectures. |
---|
370 | * The CPU port specific code (inter-processor interrupt handling and |
---|
371 | * _CPU_SMP_Send_interrupt()) must guarantee that this processor observes the |
---|
372 | * last value written. |
---|
373 | * |
---|
374 | * A thread can be a heir on at most one processor in the system. |
---|
375 | * |
---|
376 | * @see _Thread_Get_heir_and_make_it_executing(). |
---|
377 | */ |
---|
378 | struct _Thread_Control *heir; |
---|
379 | |
---|
380 | #if defined(RTEMS_SMP) |
---|
381 | CPU_Interrupt_frame Interrupt_frame; |
---|
382 | #endif |
---|
383 | |
---|
384 | /** |
---|
385 | * @brief The CPU usage timestamp contains the time point of the last heir |
---|
386 | * thread change or last CPU usage update of the executing thread of this |
---|
387 | * processor. |
---|
388 | * |
---|
389 | * Protected by the scheduler lock. |
---|
390 | * |
---|
391 | * @see _Scheduler_Update_heir(), _Thread_Dispatch_update_heir() and |
---|
392 | * _Thread_Get_CPU_time_used(). |
---|
393 | */ |
---|
394 | Timestamp_Control cpu_usage_timestamp; |
---|
395 | |
---|
396 | /** |
---|
397 | * @brief Watchdog state for this processor. |
---|
398 | */ |
---|
399 | struct { |
---|
400 | /** |
---|
401 | * @brief Protects all watchdog operations on this processor. |
---|
402 | */ |
---|
403 | ISR_LOCK_MEMBER( Lock ) |
---|
404 | |
---|
405 | /** |
---|
406 | * @brief Watchdog ticks on this processor used for monotonic clock |
---|
407 | * watchdogs. |
---|
408 | */ |
---|
409 | uint64_t ticks; |
---|
410 | |
---|
411 | /** |
---|
412 | * @brief Header for watchdogs. |
---|
413 | * |
---|
414 | * @see Per_CPU_Watchdog_index. |
---|
415 | */ |
---|
416 | Watchdog_Header Header[ PER_CPU_WATCHDOG_COUNT ]; |
---|
417 | } Watchdog; |
---|
418 | |
---|
419 | #if defined( RTEMS_SMP ) |
---|
420 | /** |
---|
421 | * @brief This lock protects some members of this structure. |
---|
422 | */ |
---|
423 | ISR_lock_Control Lock; |
---|
424 | |
---|
425 | /** |
---|
426 | * @brief Lock context used to acquire all per-CPU locks. |
---|
427 | * |
---|
428 | * This member is protected by the Per_CPU_Control::Lock lock. |
---|
429 | * |
---|
430 | * @see _Per_CPU_Acquire_all(). |
---|
431 | */ |
---|
432 | ISR_lock_Context Lock_context; |
---|
433 | |
---|
434 | /** |
---|
435 | * @brief Chain of threads in need for help. |
---|
436 | * |
---|
437 | * This member is protected by the Per_CPU_Control::Lock lock. |
---|
438 | */ |
---|
439 | Chain_Control Threads_in_need_for_help; |
---|
440 | |
---|
441 | /** |
---|
442 | * @brief Bit field for SMP messages. |
---|
443 | * |
---|
444 | * This member is not protected locks. Atomic operations are used to set |
---|
445 | * and get the message bits. |
---|
446 | */ |
---|
447 | Atomic_Ulong message; |
---|
448 | |
---|
449 | struct { |
---|
450 | /** |
---|
451 | * @brief The scheduler control of the scheduler owning this processor. |
---|
452 | * |
---|
453 | * This pointer is NULL in case this processor is currently not used by a |
---|
454 | * scheduler instance. |
---|
455 | */ |
---|
456 | const struct _Scheduler_Control *control; |
---|
457 | |
---|
458 | /** |
---|
459 | * @brief The scheduler context of the scheduler owning this processor. |
---|
460 | * |
---|
461 | * This pointer is NULL in case this processor is currently not used by a |
---|
462 | * scheduler instance. |
---|
463 | */ |
---|
464 | const struct Scheduler_Context *context; |
---|
465 | |
---|
466 | /** |
---|
467 | * @brief The idle thread for this processor in case it is online and |
---|
468 | * currently not used by a scheduler instance. |
---|
469 | */ |
---|
470 | struct _Thread_Control *idle_if_online_and_unused; |
---|
471 | } Scheduler; |
---|
472 | |
---|
473 | /** |
---|
474 | * @brief Begin of the per-CPU data area. |
---|
475 | * |
---|
476 | * Contains items defined via PER_CPU_DATA_ITEM(). |
---|
477 | */ |
---|
478 | char *data; |
---|
479 | |
---|
480 | /** |
---|
481 | * @brief Indicates the current state of the CPU. |
---|
482 | * |
---|
483 | * This member is protected by the _Per_CPU_State_lock lock. |
---|
484 | * |
---|
485 | * @see _Per_CPU_State_change(). |
---|
486 | */ |
---|
487 | Per_CPU_State state; |
---|
488 | |
---|
489 | /** |
---|
490 | * @brief Action to be executed by this processor in the |
---|
491 | * SYSTEM_STATE_BEFORE_MULTITASKING state on behalf of the boot processor. |
---|
492 | * |
---|
493 | * @see _SMP_Before_multitasking_action(). |
---|
494 | */ |
---|
495 | Atomic_Uintptr before_multitasking_action; |
---|
496 | |
---|
497 | /** |
---|
498 | * @brief Indicates if the processor has been successfully started via |
---|
499 | * _CPU_SMP_Start_processor(). |
---|
500 | */ |
---|
501 | bool online; |
---|
502 | |
---|
503 | /** |
---|
504 | * @brief Indicates if the processor is the one that performed the initial |
---|
505 | * system initialization. |
---|
506 | */ |
---|
507 | bool boot; |
---|
508 | #endif |
---|
509 | |
---|
510 | struct Record_Control *record; |
---|
511 | |
---|
512 | Per_CPU_Stats Stats; |
---|
513 | } Per_CPU_Control; |
---|
514 | |
---|
515 | #if defined( RTEMS_SMP ) |
---|
516 | typedef struct { |
---|
517 | Per_CPU_Control per_cpu; |
---|
518 | char unused_space_for_cache_line_alignment |
---|
519 | [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ]; |
---|
520 | } Per_CPU_Control_envelope; |
---|
521 | #else |
---|
522 | typedef struct { |
---|
523 | Per_CPU_Control per_cpu; |
---|
524 | } Per_CPU_Control_envelope; |
---|
525 | #endif |
---|
526 | |
---|
527 | /** |
---|
528 | * @brief Set of Per CPU Core Information |
---|
529 | * |
---|
530 | * This is an array of per CPU core information. |
---|
531 | */ |
---|
532 | extern Per_CPU_Control_envelope _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT; |
---|
533 | |
---|
534 | #define _Per_CPU_Acquire( cpu, lock_context ) \ |
---|
535 | _ISR_lock_Acquire( &( cpu )->Lock, lock_context ) |
---|
536 | |
---|
537 | #define _Per_CPU_Release( cpu, lock_context ) \ |
---|
538 | _ISR_lock_Release( &( cpu )->Lock, lock_context ) |
---|
539 | |
---|
540 | /* |
---|
541 | * If we get the current processor index in a context which allows thread |
---|
542 | * dispatching, then we may already run on another processor right after the |
---|
543 | * read instruction. There are very few cases in which this makes sense (here |
---|
544 | * we can use _Per_CPU_Get_snapshot()). All other places must use |
---|
545 | * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG. |
---|
546 | */ |
---|
547 | #if defined( _CPU_Get_current_per_CPU_control ) |
---|
548 | #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control() |
---|
549 | #else |
---|
550 | #define _Per_CPU_Get_snapshot() \ |
---|
551 | ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu ) |
---|
552 | #endif |
---|
553 | |
---|
554 | #if defined( RTEMS_SMP ) |
---|
555 | static inline Per_CPU_Control *_Per_CPU_Get( void ) |
---|
556 | { |
---|
557 | Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot(); |
---|
558 | |
---|
559 | _Assert( |
---|
560 | cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0 |
---|
561 | ); |
---|
562 | |
---|
563 | return cpu_self; |
---|
564 | } |
---|
565 | #else |
---|
566 | #define _Per_CPU_Get() _Per_CPU_Get_snapshot() |
---|
567 | #endif |
---|
568 | |
---|
569 | static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index ) |
---|
570 | { |
---|
571 | return &_Per_CPU_Information[ index ].per_cpu; |
---|
572 | } |
---|
573 | |
---|
574 | static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu ) |
---|
575 | { |
---|
576 | const Per_CPU_Control_envelope *per_cpu_envelope = |
---|
577 | ( const Per_CPU_Control_envelope * ) cpu; |
---|
578 | |
---|
579 | return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] ); |
---|
580 | } |
---|
581 | |
---|
582 | static inline struct _Thread_Control *_Per_CPU_Get_executing( |
---|
583 | const Per_CPU_Control *cpu |
---|
584 | ) |
---|
585 | { |
---|
586 | return cpu->executing; |
---|
587 | } |
---|
588 | |
---|
589 | static inline bool _Per_CPU_Is_processor_online( |
---|
590 | const Per_CPU_Control *cpu |
---|
591 | ) |
---|
592 | { |
---|
593 | #if defined( RTEMS_SMP ) |
---|
594 | return cpu->online; |
---|
595 | #else |
---|
596 | (void) cpu; |
---|
597 | |
---|
598 | return true; |
---|
599 | #endif |
---|
600 | } |
---|
601 | |
---|
602 | static inline bool _Per_CPU_Is_boot_processor( |
---|
603 | const Per_CPU_Control *cpu |
---|
604 | ) |
---|
605 | { |
---|
606 | #if defined( RTEMS_SMP ) |
---|
607 | return cpu->boot; |
---|
608 | #else |
---|
609 | (void) cpu; |
---|
610 | |
---|
611 | return true; |
---|
612 | #endif |
---|
613 | } |
---|
614 | |
---|
615 | RTEMS_INLINE_ROUTINE void _Per_CPU_Acquire_all( |
---|
616 | ISR_lock_Context *lock_context |
---|
617 | ) |
---|
618 | { |
---|
619 | #if defined(RTEMS_SMP) |
---|
620 | uint32_t cpu_max; |
---|
621 | uint32_t cpu_index; |
---|
622 | Per_CPU_Control *previous_cpu; |
---|
623 | |
---|
624 | cpu_max = _SMP_Get_processor_maximum(); |
---|
625 | previous_cpu = _Per_CPU_Get_by_index( 0 ); |
---|
626 | |
---|
627 | _ISR_lock_ISR_disable( lock_context ); |
---|
628 | _Per_CPU_Acquire( previous_cpu, lock_context ); |
---|
629 | |
---|
630 | for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) { |
---|
631 | Per_CPU_Control *cpu; |
---|
632 | |
---|
633 | cpu = _Per_CPU_Get_by_index( cpu_index ); |
---|
634 | _Per_CPU_Acquire( cpu, &previous_cpu->Lock_context ); |
---|
635 | previous_cpu = cpu; |
---|
636 | } |
---|
637 | #else |
---|
638 | _ISR_lock_ISR_disable( lock_context ); |
---|
639 | #endif |
---|
640 | } |
---|
641 | |
---|
642 | RTEMS_INLINE_ROUTINE void _Per_CPU_Release_all( |
---|
643 | ISR_lock_Context *lock_context |
---|
644 | ) |
---|
645 | { |
---|
646 | #if defined(RTEMS_SMP) |
---|
647 | uint32_t cpu_max; |
---|
648 | uint32_t cpu_index; |
---|
649 | Per_CPU_Control *cpu; |
---|
650 | |
---|
651 | cpu_max = _SMP_Get_processor_maximum(); |
---|
652 | cpu = _Per_CPU_Get_by_index( cpu_max - 1 ); |
---|
653 | |
---|
654 | for ( cpu_index = cpu_max - 1 ; cpu_index > 0 ; --cpu_index ) { |
---|
655 | Per_CPU_Control *previous_cpu; |
---|
656 | |
---|
657 | previous_cpu = _Per_CPU_Get_by_index( cpu_index - 1 ); |
---|
658 | _Per_CPU_Release( cpu, &previous_cpu->Lock_context ); |
---|
659 | cpu = previous_cpu; |
---|
660 | } |
---|
661 | |
---|
662 | _Per_CPU_Release( cpu, lock_context ); |
---|
663 | _ISR_lock_ISR_enable( lock_context ); |
---|
664 | #else |
---|
665 | _ISR_lock_ISR_enable( lock_context ); |
---|
666 | #endif |
---|
667 | } |
---|
668 | |
---|
669 | #if defined( RTEMS_SMP ) |
---|
670 | |
---|
671 | /** |
---|
672 | * @brief Allocate and Initialize Per CPU Structures |
---|
673 | * |
---|
674 | * This method allocates and initialize the per CPU structure. |
---|
675 | */ |
---|
676 | void _Per_CPU_Initialize(void); |
---|
677 | |
---|
678 | void _Per_CPU_State_change( |
---|
679 | Per_CPU_Control *cpu, |
---|
680 | Per_CPU_State new_state |
---|
681 | ); |
---|
682 | |
---|
683 | /** |
---|
684 | * @brief Waits for a processor to change into a non-initial state. |
---|
685 | * |
---|
686 | * This function should be called only in _CPU_SMP_Start_processor() if |
---|
687 | * required by the CPU port or BSP. |
---|
688 | * |
---|
689 | * @code |
---|
690 | * bool _CPU_SMP_Start_processor(uint32_t cpu_index) |
---|
691 | * { |
---|
692 | * uint32_t timeout = 123456; |
---|
693 | * |
---|
694 | * start_the_processor(cpu_index); |
---|
695 | * |
---|
696 | * return _Per_CPU_State_wait_for_non_initial_state(cpu_index, timeout); |
---|
697 | * } |
---|
698 | * @endcode |
---|
699 | * |
---|
700 | * @param[in] cpu_index The processor index. |
---|
701 | * @param[in] timeout_in_ns The timeout in nanoseconds. Use a value of zero to |
---|
702 | * wait forever if necessary. |
---|
703 | * |
---|
704 | * @retval true The processor is in a non-initial state. |
---|
705 | * @retval false The timeout expired before the processor reached a non-initial |
---|
706 | * state. |
---|
707 | */ |
---|
708 | bool _Per_CPU_State_wait_for_non_initial_state( |
---|
709 | uint32_t cpu_index, |
---|
710 | uint32_t timeout_in_ns |
---|
711 | ); |
---|
712 | |
---|
713 | #endif /* defined( RTEMS_SMP ) */ |
---|
714 | |
---|
715 | /* |
---|
716 | * On a non SMP system, the _SMP_Get_current_processor() is defined to 0. |
---|
717 | * Thus when built for non-SMP, there should be no performance penalty. |
---|
718 | */ |
---|
719 | #define _Thread_Dispatch_disable_level \ |
---|
720 | _Per_CPU_Get()->thread_dispatch_disable_level |
---|
721 | #define _Thread_Heir \ |
---|
722 | _Per_CPU_Get()->heir |
---|
723 | |
---|
724 | #if defined(_CPU_Get_thread_executing) |
---|
725 | #define _Thread_Executing \ |
---|
726 | _CPU_Get_thread_executing() |
---|
727 | #else |
---|
728 | #define _Thread_Executing \ |
---|
729 | _Per_CPU_Get_executing( _Per_CPU_Get() ) |
---|
730 | #endif |
---|
731 | |
---|
732 | #define _ISR_Nest_level \ |
---|
733 | _Per_CPU_Get()->isr_nest_level |
---|
734 | #define _CPU_Interrupt_stack_low \ |
---|
735 | _Per_CPU_Get()->interrupt_stack_low |
---|
736 | #define _CPU_Interrupt_stack_high \ |
---|
737 | _Per_CPU_Get()->interrupt_stack_high |
---|
738 | #define _Thread_Dispatch_necessary \ |
---|
739 | _Per_CPU_Get()->dispatch_necessary |
---|
740 | |
---|
741 | /** |
---|
742 | * @brief Returns the thread control block of the executing thread. |
---|
743 | * |
---|
744 | * This function can be called in any thread context. On SMP configurations, |
---|
745 | * interrupts are disabled to ensure that the processor index is used |
---|
746 | * consistently if no CPU port specific method is available to get the |
---|
747 | * executing thread. |
---|
748 | * |
---|
749 | * @return The thread control block of the executing thread. |
---|
750 | */ |
---|
751 | RTEMS_INLINE_ROUTINE struct _Thread_Control *_Thread_Get_executing( void ) |
---|
752 | { |
---|
753 | struct _Thread_Control *executing; |
---|
754 | |
---|
755 | #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing) |
---|
756 | ISR_Level level; |
---|
757 | |
---|
758 | _ISR_Local_disable( level ); |
---|
759 | #endif |
---|
760 | |
---|
761 | executing = _Thread_Executing; |
---|
762 | |
---|
763 | #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing) |
---|
764 | _ISR_Local_enable( level ); |
---|
765 | #endif |
---|
766 | |
---|
767 | return executing; |
---|
768 | } |
---|
769 | |
---|
770 | /**@}*/ |
---|
771 | |
---|
772 | #endif /* !defined( ASM ) */ |
---|
773 | |
---|
774 | #if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) |
---|
775 | |
---|
776 | #define PER_CPU_INTERRUPT_STACK_LOW \ |
---|
777 | CPU_PER_CPU_CONTROL_SIZE |
---|
778 | #define PER_CPU_INTERRUPT_STACK_HIGH \ |
---|
779 | PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER |
---|
780 | |
---|
781 | #define INTERRUPT_STACK_LOW \ |
---|
782 | (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW) |
---|
783 | #define INTERRUPT_STACK_HIGH \ |
---|
784 | (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH) |
---|
785 | |
---|
786 | /* |
---|
787 | * These are the offsets of the required elements in the per CPU table. |
---|
788 | */ |
---|
789 | #define PER_CPU_ISR_NEST_LEVEL \ |
---|
790 | PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER |
---|
791 | #define PER_CPU_ISR_DISPATCH_DISABLE \ |
---|
792 | PER_CPU_ISR_NEST_LEVEL + 4 |
---|
793 | #define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \ |
---|
794 | PER_CPU_ISR_DISPATCH_DISABLE + 4 |
---|
795 | #define PER_CPU_DISPATCH_NEEDED \ |
---|
796 | PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4 |
---|
797 | #define PER_CPU_OFFSET_EXECUTING \ |
---|
798 | PER_CPU_DISPATCH_NEEDED + 4 |
---|
799 | #define PER_CPU_OFFSET_HEIR \ |
---|
800 | PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER |
---|
801 | #if defined(RTEMS_SMP) |
---|
802 | #define PER_CPU_INTERRUPT_FRAME_AREA \ |
---|
803 | PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER |
---|
804 | #endif |
---|
805 | |
---|
806 | #define THREAD_DISPATCH_DISABLE_LEVEL \ |
---|
807 | (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL) |
---|
808 | #define ISR_NEST_LEVEL \ |
---|
809 | (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL) |
---|
810 | #define DISPATCH_NEEDED \ |
---|
811 | (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED) |
---|
812 | |
---|
813 | #endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */ |
---|
814 | |
---|
815 | #ifdef __cplusplus |
---|
816 | } |
---|
817 | #endif |
---|
818 | |
---|
819 | #endif |
---|
820 | /* end of include file */ |
---|