1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief Inlined Routines from the Thread Handler |
---|
5 | * |
---|
6 | * This file contains the macro implementation of the inlined |
---|
7 | * routines from the Thread handler. |
---|
8 | */ |
---|
9 | |
---|
10 | /* |
---|
11 | * COPYRIGHT (c) 1989-2008. |
---|
12 | * On-Line Applications Research Corporation (OAR). |
---|
13 | * |
---|
14 | * Copyright (c) 2014-2015 embedded brains GmbH. |
---|
15 | * |
---|
16 | * The license and distribution terms for this file may be |
---|
17 | * found in the file LICENSE in this distribution or at |
---|
18 | * http://www.rtems.org/license/LICENSE. |
---|
19 | */ |
---|
20 | |
---|
21 | #ifndef _RTEMS_SCORE_THREADIMPL_H |
---|
22 | #define _RTEMS_SCORE_THREADIMPL_H |
---|
23 | |
---|
24 | #include <rtems/score/thread.h> |
---|
25 | #include <rtems/score/assert.h> |
---|
26 | #include <rtems/score/chainimpl.h> |
---|
27 | #include <rtems/score/interr.h> |
---|
28 | #include <rtems/score/isr.h> |
---|
29 | #include <rtems/score/objectimpl.h> |
---|
30 | #include <rtems/score/resourceimpl.h> |
---|
31 | #include <rtems/score/statesimpl.h> |
---|
32 | #include <rtems/score/status.h> |
---|
33 | #include <rtems/score/sysstate.h> |
---|
34 | #include <rtems/score/threadqimpl.h> |
---|
35 | #include <rtems/score/todimpl.h> |
---|
36 | #include <rtems/score/freechain.h> |
---|
37 | #include <rtems/score/watchdogimpl.h> |
---|
38 | #include <rtems/config.h> |
---|
39 | |
---|
40 | #ifdef __cplusplus |
---|
41 | extern "C" { |
---|
42 | #endif |
---|
43 | |
---|
44 | /** |
---|
45 | * @addtogroup ScoreThread |
---|
46 | */ |
---|
47 | /**@{**/ |
---|
48 | |
---|
49 | /** |
---|
50 | * The following structure contains the information necessary to manage |
---|
51 | * a thread which it is waiting for a resource. |
---|
52 | */ |
---|
53 | #define THREAD_STATUS_PROXY_BLOCKING 0x1111111 |
---|
54 | |
---|
55 | /** |
---|
56 | * Self for the GNU Ada Run-Time |
---|
57 | */ |
---|
58 | extern void *rtems_ada_self; |
---|
59 | |
---|
60 | typedef struct { |
---|
61 | Objects_Information Objects; |
---|
62 | |
---|
63 | Freechain_Control Free_thread_queue_heads; |
---|
64 | } Thread_Information; |
---|
65 | |
---|
66 | /** |
---|
67 | * The following defines the information control block used to |
---|
68 | * manage this class of objects. |
---|
69 | */ |
---|
70 | extern Thread_Information _Thread_Internal_information; |
---|
71 | |
---|
72 | /** |
---|
73 | * The following points to the thread whose floating point |
---|
74 | * context is currently loaded. |
---|
75 | */ |
---|
76 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
77 | extern Thread_Control *_Thread_Allocated_fp; |
---|
78 | #endif |
---|
79 | |
---|
80 | #define THREAD_CHAIN_NODE_TO_THREAD( node ) \ |
---|
81 | RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.Chain ) |
---|
82 | |
---|
83 | #define THREAD_RBTREE_NODE_TO_THREAD( node ) \ |
---|
84 | RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.RBTree ) |
---|
85 | |
---|
86 | #if defined(RTEMS_SMP) |
---|
87 | #define THREAD_RESOURCE_NODE_TO_THREAD( node ) \ |
---|
88 | RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node ) |
---|
89 | #endif |
---|
90 | |
---|
91 | void _Thread_Initialize_information( |
---|
92 | Thread_Information *information, |
---|
93 | Objects_APIs the_api, |
---|
94 | uint16_t the_class, |
---|
95 | uint32_t maximum, |
---|
96 | bool is_string, |
---|
97 | uint32_t maximum_name_length |
---|
98 | ); |
---|
99 | |
---|
100 | /** |
---|
101 | * @brief Initialize thread handler. |
---|
102 | * |
---|
103 | * This routine performs the initialization necessary for this handler. |
---|
104 | */ |
---|
105 | void _Thread_Handler_initialization(void); |
---|
106 | |
---|
107 | /** |
---|
108 | * @brief Create idle thread. |
---|
109 | * |
---|
110 | * This routine creates the idle thread. |
---|
111 | * |
---|
112 | * @warning No thread should be created before this one. |
---|
113 | */ |
---|
114 | void _Thread_Create_idle(void); |
---|
115 | |
---|
116 | /** |
---|
117 | * @brief Start thread multitasking. |
---|
118 | * |
---|
119 | * This routine initiates multitasking. It is invoked only as |
---|
120 | * part of initialization and its invocation is the last act of |
---|
121 | * the non-multitasking part of the system initialization. |
---|
122 | */ |
---|
123 | void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN; |
---|
124 | |
---|
125 | /** |
---|
126 | * @brief Allocate the requested stack space for the thread. |
---|
127 | * |
---|
128 | * Allocate the requested stack space for the thread. |
---|
129 | * Set the Start.stack field to the address of the stack. |
---|
130 | * |
---|
131 | * @param[in] the_thread is the thread where the stack space is requested |
---|
132 | * @param[in] stack_size is the stack space is requested |
---|
133 | * |
---|
134 | * @retval actual size allocated after any adjustment |
---|
135 | * @retval zero if the allocation failed |
---|
136 | */ |
---|
137 | size_t _Thread_Stack_Allocate( |
---|
138 | Thread_Control *the_thread, |
---|
139 | size_t stack_size |
---|
140 | ); |
---|
141 | |
---|
142 | /** |
---|
143 | * @brief Deallocate thread stack. |
---|
144 | * |
---|
145 | * Deallocate the Thread's stack. |
---|
146 | */ |
---|
147 | void _Thread_Stack_Free( |
---|
148 | Thread_Control *the_thread |
---|
149 | ); |
---|
150 | |
---|
151 | /** |
---|
152 | * @brief Initialize thread. |
---|
153 | * |
---|
154 | * This routine initializes the specified the thread. It allocates |
---|
155 | * all memory associated with this thread. It completes by adding |
---|
156 | * the thread to the local object table so operations on this |
---|
157 | * thread id are allowed. |
---|
158 | * |
---|
159 | * @note If stack_area is NULL, it is allocated from the workspace. |
---|
160 | * |
---|
161 | * @note If the stack is allocated from the workspace, then it is |
---|
162 | * guaranteed to be of at least minimum size. |
---|
163 | */ |
---|
164 | bool _Thread_Initialize( |
---|
165 | Thread_Information *information, |
---|
166 | Thread_Control *the_thread, |
---|
167 | const struct Scheduler_Control *scheduler, |
---|
168 | void *stack_area, |
---|
169 | size_t stack_size, |
---|
170 | bool is_fp, |
---|
171 | Priority_Control priority, |
---|
172 | bool is_preemptible, |
---|
173 | Thread_CPU_budget_algorithms budget_algorithm, |
---|
174 | Thread_CPU_budget_algorithm_callout budget_callout, |
---|
175 | uint32_t isr_level, |
---|
176 | Objects_Name name |
---|
177 | ); |
---|
178 | |
---|
179 | /** |
---|
180 | * @brief Initializes thread and executes it. |
---|
181 | * |
---|
182 | * This routine initializes the executable information for a thread |
---|
183 | * and makes it ready to execute. After this routine executes, the |
---|
184 | * thread competes with all other threads for CPU time. |
---|
185 | * |
---|
186 | * @param the_thread The thread to be started. |
---|
187 | * @param entry The thread entry information. |
---|
188 | */ |
---|
189 | bool _Thread_Start( |
---|
190 | Thread_Control *the_thread, |
---|
191 | const Thread_Entry_information *entry, |
---|
192 | ISR_lock_Context *lock_context |
---|
193 | ); |
---|
194 | |
---|
195 | void _Thread_Restart_self( |
---|
196 | Thread_Control *executing, |
---|
197 | const Thread_Entry_information *entry, |
---|
198 | ISR_lock_Context *lock_context |
---|
199 | ) RTEMS_NO_RETURN; |
---|
200 | |
---|
201 | bool _Thread_Restart_other( |
---|
202 | Thread_Control *the_thread, |
---|
203 | const Thread_Entry_information *entry, |
---|
204 | ISR_lock_Context *lock_context |
---|
205 | ); |
---|
206 | |
---|
207 | void _Thread_Yield( Thread_Control *executing ); |
---|
208 | |
---|
209 | Thread_Life_state _Thread_Change_life( |
---|
210 | Thread_Life_state clear, |
---|
211 | Thread_Life_state set, |
---|
212 | Thread_Life_state ignore |
---|
213 | ); |
---|
214 | |
---|
215 | Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state ); |
---|
216 | |
---|
217 | /** |
---|
218 | * @brief Kills all zombie threads in the system. |
---|
219 | * |
---|
220 | * Threads change into the zombie state as the last step in the thread |
---|
221 | * termination sequence right before a context switch to the heir thread is |
---|
222 | * initiated. Since the thread stack is still in use during this phase we have |
---|
223 | * to postpone the thread stack reclamation until this point. On SMP |
---|
224 | * configurations we may have to busy wait for context switch completion here. |
---|
225 | */ |
---|
226 | void _Thread_Kill_zombies( void ); |
---|
227 | |
---|
228 | void _Thread_Exit( |
---|
229 | Thread_Control *executing, |
---|
230 | Thread_Life_state set, |
---|
231 | void *exit_value |
---|
232 | ); |
---|
233 | |
---|
234 | void _Thread_Join( |
---|
235 | Thread_Control *the_thread, |
---|
236 | States_Control waiting_for_join, |
---|
237 | Thread_Control *executing, |
---|
238 | Thread_queue_Context *queue_context |
---|
239 | ); |
---|
240 | |
---|
241 | void _Thread_Cancel( |
---|
242 | Thread_Control *the_thread, |
---|
243 | Thread_Control *executing, |
---|
244 | void *exit_value |
---|
245 | ); |
---|
246 | |
---|
247 | /** |
---|
248 | * @brief Closes the thread. |
---|
249 | * |
---|
250 | * Closes the thread object and starts the thread termination sequence. In |
---|
251 | * case the executing thread is not terminated, then this function waits until |
---|
252 | * the terminating thread reached the zombie state. |
---|
253 | */ |
---|
254 | void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing ); |
---|
255 | |
---|
256 | RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread ) |
---|
257 | { |
---|
258 | return _States_Is_ready( the_thread->current_state ); |
---|
259 | } |
---|
260 | |
---|
261 | States_Control _Thread_Clear_state_locked( |
---|
262 | Thread_Control *the_thread, |
---|
263 | States_Control state |
---|
264 | ); |
---|
265 | |
---|
266 | /** |
---|
267 | * @brief Clears the specified thread state. |
---|
268 | * |
---|
269 | * In case the previous state is a non-ready state and the next state is the |
---|
270 | * ready state, then the thread is unblocked by the scheduler. |
---|
271 | * |
---|
272 | * @param[in] the_thread The thread. |
---|
273 | * @param[in] state The state to clear. It must not be zero. |
---|
274 | * |
---|
275 | * @return The previous state. |
---|
276 | */ |
---|
277 | States_Control _Thread_Clear_state( |
---|
278 | Thread_Control *the_thread, |
---|
279 | States_Control state |
---|
280 | ); |
---|
281 | |
---|
282 | States_Control _Thread_Set_state_locked( |
---|
283 | Thread_Control *the_thread, |
---|
284 | States_Control state |
---|
285 | ); |
---|
286 | |
---|
287 | /** |
---|
288 | * @brief Sets the specified thread state. |
---|
289 | * |
---|
290 | * In case the previous state is the ready state, then the thread is blocked by |
---|
291 | * the scheduler. |
---|
292 | * |
---|
293 | * @param[in] the_thread The thread. |
---|
294 | * @param[in] state The state to set. It must not be zero. |
---|
295 | * |
---|
296 | * @return The previous state. |
---|
297 | */ |
---|
298 | States_Control _Thread_Set_state( |
---|
299 | Thread_Control *the_thread, |
---|
300 | States_Control state |
---|
301 | ); |
---|
302 | |
---|
303 | /** |
---|
304 | * @brief Initializes enviroment for a thread. |
---|
305 | * |
---|
306 | * This routine initializes the context of @a the_thread to its |
---|
307 | * appropriate starting state. |
---|
308 | * |
---|
309 | * @param[in] the_thread is the pointer to the thread control block. |
---|
310 | */ |
---|
311 | void _Thread_Load_environment( |
---|
312 | Thread_Control *the_thread |
---|
313 | ); |
---|
314 | |
---|
315 | void _Thread_Entry_adaptor_idle( Thread_Control *executing ); |
---|
316 | |
---|
317 | void _Thread_Entry_adaptor_numeric( Thread_Control *executing ); |
---|
318 | |
---|
319 | void _Thread_Entry_adaptor_pointer( Thread_Control *executing ); |
---|
320 | |
---|
321 | /** |
---|
322 | * @brief Wrapper function for all threads. |
---|
323 | * |
---|
324 | * This routine is the wrapper function for all threads. It is |
---|
325 | * the starting point for all threads. The user provided thread |
---|
326 | * entry point is invoked by this routine. Operations |
---|
327 | * which must be performed immediately before and after the user's |
---|
328 | * thread executes are found here. |
---|
329 | * |
---|
330 | * @note On entry, it is assumed all interrupts are blocked and that this |
---|
331 | * routine needs to set the initial isr level. This may or may not |
---|
332 | * actually be needed by the context switch routine and as a result |
---|
333 | * interrupts may already be at there proper level. Either way, |
---|
334 | * setting the initial isr level properly here is safe. |
---|
335 | */ |
---|
336 | void _Thread_Handler( void ); |
---|
337 | |
---|
338 | /** |
---|
339 | * @brief Executes the global constructors and then restarts itself as the |
---|
340 | * first initialization thread. |
---|
341 | * |
---|
342 | * The first initialization thread is the first RTEMS initialization task or |
---|
343 | * the first POSIX initialization thread in case no RTEMS initialization tasks |
---|
344 | * are present. |
---|
345 | */ |
---|
346 | void _Thread_Global_construction( |
---|
347 | Thread_Control *executing, |
---|
348 | const Thread_Entry_information *entry |
---|
349 | ) RTEMS_NO_RETURN; |
---|
350 | |
---|
351 | /** |
---|
352 | * @brief Ended the delay of a thread. |
---|
353 | * |
---|
354 | * This routine is invoked when a thread must be unblocked at the |
---|
355 | * end of a time based delay (i.e. wake after or wake when). |
---|
356 | * It is called by the watchdog handler. |
---|
357 | * |
---|
358 | * @param[in] id is the thread id |
---|
359 | * @param[in] ignored is not used |
---|
360 | */ |
---|
361 | void _Thread_Delay_ended( |
---|
362 | Objects_Id id, |
---|
363 | void *ignored |
---|
364 | ); |
---|
365 | |
---|
366 | RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical( |
---|
367 | Thread_Control *the_thread, |
---|
368 | ISR_lock_Context *lock_context |
---|
369 | ) |
---|
370 | { |
---|
371 | _Thread_queue_Acquire_critical( &the_thread->Join_queue, lock_context ); |
---|
372 | } |
---|
373 | |
---|
374 | RTEMS_INLINE_ROUTINE void _Thread_State_acquire( |
---|
375 | Thread_Control *the_thread, |
---|
376 | ISR_lock_Context *lock_context |
---|
377 | ) |
---|
378 | { |
---|
379 | _Thread_queue_Acquire( &the_thread->Join_queue, lock_context ); |
---|
380 | } |
---|
381 | |
---|
382 | RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing( |
---|
383 | ISR_lock_Context *lock_context |
---|
384 | ) |
---|
385 | { |
---|
386 | Thread_Control *executing; |
---|
387 | |
---|
388 | _ISR_lock_ISR_disable( lock_context ); |
---|
389 | executing = _Thread_Executing; |
---|
390 | _Thread_State_acquire_critical( executing, lock_context ); |
---|
391 | |
---|
392 | return executing; |
---|
393 | } |
---|
394 | |
---|
395 | RTEMS_INLINE_ROUTINE void _Thread_State_release_critical( |
---|
396 | Thread_Control *the_thread, |
---|
397 | ISR_lock_Context *lock_context |
---|
398 | ) |
---|
399 | { |
---|
400 | _Thread_queue_Release_critical( &the_thread->Join_queue, lock_context ); |
---|
401 | } |
---|
402 | |
---|
403 | RTEMS_INLINE_ROUTINE void _Thread_State_release( |
---|
404 | Thread_Control *the_thread, |
---|
405 | ISR_lock_Context *lock_context |
---|
406 | ) |
---|
407 | { |
---|
408 | _Thread_queue_Release( &the_thread->Join_queue, lock_context ); |
---|
409 | } |
---|
410 | |
---|
411 | #if defined(RTEMS_DEBUG) |
---|
412 | RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner( |
---|
413 | const Thread_Control *the_thread |
---|
414 | ) |
---|
415 | { |
---|
416 | return _Thread_queue_Is_lock_owner( &the_thread->Join_queue ); |
---|
417 | } |
---|
418 | #endif |
---|
419 | |
---|
420 | /** |
---|
421 | * @brief Returns true if the left thread priority is less than the right |
---|
422 | * thread priority in the intuitive sense of priority and false otherwise. |
---|
423 | */ |
---|
424 | RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than( |
---|
425 | Priority_Control left, |
---|
426 | Priority_Control right |
---|
427 | ) |
---|
428 | { |
---|
429 | return left > right; |
---|
430 | } |
---|
431 | |
---|
432 | /** |
---|
433 | * @brief Returns the highest priority of the left and right thread priorities |
---|
434 | * in the intuitive sense of priority. |
---|
435 | */ |
---|
436 | RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest( |
---|
437 | Priority_Control left, |
---|
438 | Priority_Control right |
---|
439 | ) |
---|
440 | { |
---|
441 | return _Thread_Priority_less_than( left, right ) ? right : left; |
---|
442 | } |
---|
443 | |
---|
444 | /** |
---|
445 | * @brief Filters a thread priority change. |
---|
446 | * |
---|
447 | * Called by _Thread_Change_priority() under the protection of the thread lock. |
---|
448 | * |
---|
449 | * @param[in] the_thread The thread. |
---|
450 | * @param[in, out] new_priority The new priority of the thread. The filter may |
---|
451 | * alter this value. |
---|
452 | * @param[in] arg The argument passed to _Thread_Change_priority(). |
---|
453 | * |
---|
454 | * @retval true Change the current priority. |
---|
455 | * @retval false Otherwise. |
---|
456 | */ |
---|
457 | typedef bool ( *Thread_Change_priority_filter )( |
---|
458 | Thread_Control *the_thread, |
---|
459 | Priority_Control *new_priority, |
---|
460 | void *arg |
---|
461 | ); |
---|
462 | |
---|
463 | /** |
---|
464 | * @brief Changes the priority of a thread if allowed by the filter function. |
---|
465 | * |
---|
466 | * It changes current priority of the thread to the new priority in case the |
---|
467 | * filter function returns true. In this case the scheduler is notified of the |
---|
468 | * priority change as well. |
---|
469 | * |
---|
470 | * @param[in] the_thread The thread. |
---|
471 | * @param[in] new_priority The new priority of the thread. |
---|
472 | * @param[in] arg The argument for the filter function. |
---|
473 | * @param[in] filter The filter function to determine if a priority change is |
---|
474 | * allowed and optionally perform other actions under the protection of the |
---|
475 | * thread lock simultaneously with the update of the current priority. |
---|
476 | * @param[in] prepend_it In case this is true, then the thread is prepended to |
---|
477 | * its priority group in its scheduler instance, otherwise it is appended. |
---|
478 | */ |
---|
479 | void _Thread_Change_priority( |
---|
480 | Thread_Control *the_thread, |
---|
481 | Priority_Control new_priority, |
---|
482 | void *arg, |
---|
483 | Thread_Change_priority_filter filter, |
---|
484 | bool prepend_it |
---|
485 | ); |
---|
486 | |
---|
487 | /** |
---|
488 | * @brief Raises the priority of a thread. |
---|
489 | * |
---|
490 | * It changes the current priority of the thread to the new priority if the new |
---|
491 | * priority is higher than the current priority. In this case the thread is |
---|
492 | * appended to its new priority group in its scheduler instance. |
---|
493 | * |
---|
494 | * @param[in] the_thread The thread. |
---|
495 | * @param[in] new_priority The new priority of the thread. |
---|
496 | * |
---|
497 | * @see _Thread_Change_priority(). |
---|
498 | */ |
---|
499 | void _Thread_Raise_priority( |
---|
500 | Thread_Control *the_thread, |
---|
501 | Priority_Control new_priority |
---|
502 | ); |
---|
503 | |
---|
504 | /** |
---|
505 | * @brief Inherit the priority of a thread. |
---|
506 | * |
---|
507 | * It changes the current priority of the inheritor thread to the current priority |
---|
508 | * of the ancestor thread if it is higher than the current priority of the inheritor |
---|
509 | * thread. In this case the inheritor thread is appended to its new priority group |
---|
510 | * in its scheduler instance. |
---|
511 | * |
---|
512 | * On SMP configurations, the priority is changed to PRIORITY_PSEUDO_ISR in |
---|
513 | * case the own schedulers of the inheritor and ancestor thread differ (priority |
---|
514 | * boosting). |
---|
515 | * |
---|
516 | * @param[in] inheritor The thread to inherit the priority. |
---|
517 | * @param[in] ancestor The thread to bequeath its priority to the inheritor |
---|
518 | * thread. |
---|
519 | */ |
---|
520 | #if defined(RTEMS_SMP) |
---|
521 | void _Thread_Inherit_priority( |
---|
522 | Thread_Control *inheritor, |
---|
523 | Thread_Control *ancestor |
---|
524 | ); |
---|
525 | #else |
---|
526 | RTEMS_INLINE_ROUTINE void _Thread_Inherit_priority( |
---|
527 | Thread_Control *inheritor, |
---|
528 | Thread_Control *ancestor |
---|
529 | ) |
---|
530 | { |
---|
531 | _Thread_Raise_priority( inheritor, ancestor->current_priority ); |
---|
532 | } |
---|
533 | #endif |
---|
534 | |
---|
535 | /** |
---|
536 | * @brief Sets the current to the real priority of a thread. |
---|
537 | * |
---|
538 | * Sets the priority restore hint to false. |
---|
539 | */ |
---|
540 | void _Thread_Restore_priority( Thread_Control *the_thread ); |
---|
541 | |
---|
542 | /** |
---|
543 | * @brief Sets the priority of a thread. |
---|
544 | * |
---|
545 | * It sets the real priority of the thread. In addition it changes the current |
---|
546 | * priority of the thread if the new priority is higher than the current |
---|
547 | * priority or the thread owns no resources. |
---|
548 | * |
---|
549 | * @param[in] the_thread The thread. |
---|
550 | * @param[in] new_priority The new priority of the thread. |
---|
551 | * @param[out] old_priority The old real priority of the thread. This pointer |
---|
552 | * must not be @c NULL. |
---|
553 | * @param[in] prepend_it In case this is true, then the thread is prepended to |
---|
554 | * its priority group in its scheduler instance, otherwise it is appended. |
---|
555 | * |
---|
556 | * @see _Thread_Change_priority(). |
---|
557 | */ |
---|
558 | void _Thread_Set_priority( |
---|
559 | Thread_Control *the_thread, |
---|
560 | Priority_Control new_priority, |
---|
561 | Priority_Control *old_priority, |
---|
562 | bool prepend_it |
---|
563 | ); |
---|
564 | |
---|
565 | RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information( |
---|
566 | Objects_Id id |
---|
567 | ) |
---|
568 | { |
---|
569 | uint32_t the_api; |
---|
570 | |
---|
571 | the_api = _Objects_Get_API( id ); |
---|
572 | |
---|
573 | if ( !_Objects_Is_api_valid( the_api ) ) { |
---|
574 | return NULL; |
---|
575 | } |
---|
576 | |
---|
577 | /* |
---|
578 | * Threads are always first class :) |
---|
579 | * |
---|
580 | * There is no need to validate the object class of the object identifier, |
---|
581 | * since this will be done by the object get methods. |
---|
582 | */ |
---|
583 | return _Objects_Information_table[ the_api ][ 1 ]; |
---|
584 | } |
---|
585 | |
---|
586 | /** |
---|
587 | * @brief Gets a thread by its identifier. |
---|
588 | * |
---|
589 | * @see _Objects_Get(). |
---|
590 | */ |
---|
591 | Thread_Control *_Thread_Get( |
---|
592 | Objects_Id id, |
---|
593 | ISR_lock_Context *lock_context |
---|
594 | ); |
---|
595 | |
---|
596 | RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU( |
---|
597 | const Thread_Control *thread |
---|
598 | ) |
---|
599 | { |
---|
600 | #if defined(RTEMS_SMP) |
---|
601 | return thread->Scheduler.cpu; |
---|
602 | #else |
---|
603 | (void) thread; |
---|
604 | |
---|
605 | return _Per_CPU_Get(); |
---|
606 | #endif |
---|
607 | } |
---|
608 | |
---|
609 | RTEMS_INLINE_ROUTINE void _Thread_Set_CPU( |
---|
610 | Thread_Control *thread, |
---|
611 | Per_CPU_Control *cpu |
---|
612 | ) |
---|
613 | { |
---|
614 | #if defined(RTEMS_SMP) |
---|
615 | thread->Scheduler.cpu = cpu; |
---|
616 | #else |
---|
617 | (void) thread; |
---|
618 | (void) cpu; |
---|
619 | #endif |
---|
620 | } |
---|
621 | |
---|
622 | /** |
---|
623 | * This function returns true if the_thread is the currently executing |
---|
624 | * thread, and false otherwise. |
---|
625 | */ |
---|
626 | |
---|
627 | RTEMS_INLINE_ROUTINE bool _Thread_Is_executing ( |
---|
628 | const Thread_Control *the_thread |
---|
629 | ) |
---|
630 | { |
---|
631 | return ( the_thread == _Thread_Executing ); |
---|
632 | } |
---|
633 | |
---|
634 | #if defined(RTEMS_SMP) |
---|
635 | /** |
---|
636 | * @brief Returns @a true in case the thread executes currently on some |
---|
637 | * processor in the system, otherwise @a false. |
---|
638 | * |
---|
639 | * Do not confuse this with _Thread_Is_executing() which checks only the |
---|
640 | * current processor. |
---|
641 | */ |
---|
642 | RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor( |
---|
643 | const Thread_Control *the_thread |
---|
644 | ) |
---|
645 | { |
---|
646 | return _CPU_Context_Get_is_executing( &the_thread->Registers ); |
---|
647 | } |
---|
648 | #endif |
---|
649 | |
---|
650 | /** |
---|
651 | * This function returns true if the_thread is the heir |
---|
652 | * thread, and false otherwise. |
---|
653 | */ |
---|
654 | |
---|
655 | RTEMS_INLINE_ROUTINE bool _Thread_Is_heir ( |
---|
656 | const Thread_Control *the_thread |
---|
657 | ) |
---|
658 | { |
---|
659 | return ( the_thread == _Thread_Heir ); |
---|
660 | } |
---|
661 | |
---|
662 | /** |
---|
663 | * This routine clears any blocking state for the_thread. It performs |
---|
664 | * any necessary scheduling operations including the selection of |
---|
665 | * a new heir thread. |
---|
666 | */ |
---|
667 | |
---|
668 | RTEMS_INLINE_ROUTINE void _Thread_Unblock ( |
---|
669 | Thread_Control *the_thread |
---|
670 | ) |
---|
671 | { |
---|
672 | _Thread_Clear_state( the_thread, STATES_BLOCKED ); |
---|
673 | } |
---|
674 | |
---|
675 | /** |
---|
676 | * This function returns true if the floating point context of |
---|
677 | * the_thread is currently loaded in the floating point unit, and |
---|
678 | * false otherwise. |
---|
679 | */ |
---|
680 | |
---|
681 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
682 | RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp ( |
---|
683 | const Thread_Control *the_thread |
---|
684 | ) |
---|
685 | { |
---|
686 | return ( the_thread == _Thread_Allocated_fp ); |
---|
687 | } |
---|
688 | #endif |
---|
689 | |
---|
690 | /* |
---|
691 | * If the CPU has hardware floating point, then we must address saving |
---|
692 | * and restoring it as part of the context switch. |
---|
693 | * |
---|
694 | * The second conditional compilation section selects the algorithm used |
---|
695 | * to context switch between floating point tasks. The deferred algorithm |
---|
696 | * can be significantly better in a system with few floating point tasks |
---|
697 | * because it reduces the total number of save and restore FP context |
---|
698 | * operations. However, this algorithm can not be used on all CPUs due |
---|
699 | * to unpredictable use of FP registers by some compilers for integer |
---|
700 | * operations. |
---|
701 | */ |
---|
702 | |
---|
703 | RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing ) |
---|
704 | { |
---|
705 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
706 | #if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE ) |
---|
707 | if ( executing->fp_context != NULL ) |
---|
708 | _Context_Save_fp( &executing->fp_context ); |
---|
709 | #endif |
---|
710 | #endif |
---|
711 | } |
---|
712 | |
---|
713 | RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing ) |
---|
714 | { |
---|
715 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
716 | #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) |
---|
717 | if ( (executing->fp_context != NULL) && |
---|
718 | !_Thread_Is_allocated_fp( executing ) ) { |
---|
719 | if ( _Thread_Allocated_fp != NULL ) |
---|
720 | _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); |
---|
721 | _Context_Restore_fp( &executing->fp_context ); |
---|
722 | _Thread_Allocated_fp = executing; |
---|
723 | } |
---|
724 | #else |
---|
725 | if ( executing->fp_context != NULL ) |
---|
726 | _Context_Restore_fp( &executing->fp_context ); |
---|
727 | #endif |
---|
728 | #endif |
---|
729 | } |
---|
730 | |
---|
731 | /** |
---|
732 | * This routine is invoked when the currently loaded floating |
---|
733 | * point context is now longer associated with an active thread. |
---|
734 | */ |
---|
735 | |
---|
736 | #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) |
---|
737 | RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void ) |
---|
738 | { |
---|
739 | _Thread_Allocated_fp = NULL; |
---|
740 | } |
---|
741 | #endif |
---|
742 | |
---|
743 | /** |
---|
744 | * This function returns true if dispatching is disabled, and false |
---|
745 | * otherwise. |
---|
746 | */ |
---|
747 | |
---|
748 | RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void ) |
---|
749 | { |
---|
750 | return ( _Thread_Dispatch_necessary ); |
---|
751 | } |
---|
752 | |
---|
753 | /** |
---|
754 | * This function returns true if the_thread is NULL and false otherwise. |
---|
755 | */ |
---|
756 | |
---|
757 | RTEMS_INLINE_ROUTINE bool _Thread_Is_null ( |
---|
758 | const Thread_Control *the_thread |
---|
759 | ) |
---|
760 | { |
---|
761 | return ( the_thread == NULL ); |
---|
762 | } |
---|
763 | |
---|
764 | /** |
---|
765 | * @brief Is proxy blocking. |
---|
766 | * |
---|
767 | * status which indicates that a proxy is blocking, and false otherwise. |
---|
768 | */ |
---|
769 | RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking ( |
---|
770 | uint32_t code |
---|
771 | ) |
---|
772 | { |
---|
773 | return (code == THREAD_STATUS_PROXY_BLOCKING); |
---|
774 | } |
---|
775 | |
---|
776 | RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void) |
---|
777 | { |
---|
778 | /* Idle threads */ |
---|
779 | uint32_t maximum_internal_threads = |
---|
780 | rtems_configuration_get_maximum_processors(); |
---|
781 | |
---|
782 | /* MPCI thread */ |
---|
783 | #if defined(RTEMS_MULTIPROCESSING) |
---|
784 | if ( _System_state_Is_multiprocessing ) { |
---|
785 | ++maximum_internal_threads; |
---|
786 | } |
---|
787 | #endif |
---|
788 | |
---|
789 | return maximum_internal_threads; |
---|
790 | } |
---|
791 | |
---|
792 | RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void ) |
---|
793 | { |
---|
794 | return (Thread_Control *) |
---|
795 | _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects ); |
---|
796 | } |
---|
797 | |
---|
798 | /** |
---|
799 | * @brief Gets the heir of the processor and makes it executing. |
---|
800 | * |
---|
801 | * Must be called with interrupts disabled. The thread dispatch necessary |
---|
802 | * indicator is cleared as a side-effect. |
---|
803 | * |
---|
804 | * @return The heir thread. |
---|
805 | * |
---|
806 | * @see _Thread_Dispatch(), _Thread_Start_multitasking() and |
---|
807 | * _Thread_Dispatch_update_heir(). |
---|
808 | */ |
---|
809 | RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing( |
---|
810 | Per_CPU_Control *cpu_self |
---|
811 | ) |
---|
812 | { |
---|
813 | Thread_Control *heir; |
---|
814 | |
---|
815 | heir = cpu_self->heir; |
---|
816 | cpu_self->dispatch_necessary = false; |
---|
817 | cpu_self->executing = heir; |
---|
818 | |
---|
819 | return heir; |
---|
820 | } |
---|
821 | |
---|
822 | RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used( |
---|
823 | Thread_Control *the_thread, |
---|
824 | Per_CPU_Control *cpu |
---|
825 | ) |
---|
826 | { |
---|
827 | Timestamp_Control last; |
---|
828 | Timestamp_Control ran; |
---|
829 | |
---|
830 | last = cpu->cpu_usage_timestamp; |
---|
831 | _TOD_Get_uptime( &cpu->cpu_usage_timestamp ); |
---|
832 | _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran ); |
---|
833 | _Timestamp_Add_to( &the_thread->cpu_time_used, &ran ); |
---|
834 | } |
---|
835 | |
---|
836 | #if defined( RTEMS_SMP ) |
---|
837 | RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir( |
---|
838 | Per_CPU_Control *cpu_self, |
---|
839 | Per_CPU_Control *cpu_for_heir, |
---|
840 | Thread_Control *heir |
---|
841 | ) |
---|
842 | { |
---|
843 | _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir ); |
---|
844 | |
---|
845 | cpu_for_heir->heir = heir; |
---|
846 | |
---|
847 | _Thread_Dispatch_request( cpu_self, cpu_for_heir ); |
---|
848 | } |
---|
849 | #endif |
---|
850 | |
---|
851 | void _Thread_Get_CPU_time_used( |
---|
852 | Thread_Control *the_thread, |
---|
853 | Timestamp_Control *cpu_time_used |
---|
854 | ); |
---|
855 | |
---|
856 | RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize( |
---|
857 | Thread_Action_control *action_control |
---|
858 | ) |
---|
859 | { |
---|
860 | _Chain_Initialize_empty( &action_control->Chain ); |
---|
861 | } |
---|
862 | |
---|
863 | RTEMS_INLINE_ROUTINE void _Thread_Action_initialize( |
---|
864 | Thread_Action *action |
---|
865 | ) |
---|
866 | { |
---|
867 | _Chain_Set_off_chain( &action->Node ); |
---|
868 | } |
---|
869 | |
---|
870 | RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action( |
---|
871 | Thread_Control *the_thread, |
---|
872 | Thread_Action *action, |
---|
873 | Thread_Action_handler handler |
---|
874 | ) |
---|
875 | { |
---|
876 | Per_CPU_Control *cpu_of_thread; |
---|
877 | |
---|
878 | _Assert( _Thread_State_is_owner( the_thread ) ); |
---|
879 | |
---|
880 | cpu_of_thread = _Thread_Get_CPU( the_thread ); |
---|
881 | |
---|
882 | action->handler = handler; |
---|
883 | |
---|
884 | _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread ); |
---|
885 | |
---|
886 | _Chain_Append_if_is_off_chain_unprotected( |
---|
887 | &the_thread->Post_switch_actions.Chain, |
---|
888 | &action->Node |
---|
889 | ); |
---|
890 | } |
---|
891 | |
---|
892 | RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting( |
---|
893 | Thread_Life_state life_state |
---|
894 | ) |
---|
895 | { |
---|
896 | return ( life_state & THREAD_LIFE_RESTARTING ) != 0; |
---|
897 | } |
---|
898 | |
---|
899 | RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating( |
---|
900 | Thread_Life_state life_state |
---|
901 | ) |
---|
902 | { |
---|
903 | return ( life_state & THREAD_LIFE_TERMINATING ) != 0; |
---|
904 | } |
---|
905 | |
---|
906 | RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed( |
---|
907 | Thread_Life_state life_state |
---|
908 | ) |
---|
909 | { |
---|
910 | return ( life_state |
---|
911 | & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0; |
---|
912 | } |
---|
913 | |
---|
914 | RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing( |
---|
915 | Thread_Life_state life_state |
---|
916 | ) |
---|
917 | { |
---|
918 | return ( life_state |
---|
919 | & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0; |
---|
920 | } |
---|
921 | |
---|
922 | RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable( |
---|
923 | const Thread_Control *the_thread |
---|
924 | ) |
---|
925 | { |
---|
926 | _Assert( _Thread_State_is_owner( the_thread ) ); |
---|
927 | return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0; |
---|
928 | } |
---|
929 | |
---|
930 | /** |
---|
931 | * @brief Returns true if the thread owns resources, and false otherwise. |
---|
932 | * |
---|
933 | * Resources are accounted with the Thread_Control::resource_count resource |
---|
934 | * counter. This counter is used by semaphore objects for example. |
---|
935 | * |
---|
936 | * In addition to the resource counter there is a resource dependency tree |
---|
937 | * available on SMP configurations. In case this tree is non-empty, then the |
---|
938 | * thread owns resources. |
---|
939 | * |
---|
940 | * @param[in] the_thread The thread. |
---|
941 | */ |
---|
942 | RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources( |
---|
943 | const Thread_Control *the_thread |
---|
944 | ) |
---|
945 | { |
---|
946 | bool owns_resources = the_thread->resource_count != 0; |
---|
947 | |
---|
948 | #if defined(RTEMS_SMP) |
---|
949 | owns_resources = owns_resources |
---|
950 | || _Resource_Node_owns_resources( &the_thread->Resource_node ); |
---|
951 | #endif |
---|
952 | |
---|
953 | return owns_resources; |
---|
954 | } |
---|
955 | |
---|
956 | /** |
---|
957 | * @brief Acquires the default thread lock inside a critical section |
---|
958 | * (interrupts disabled). |
---|
959 | * |
---|
960 | * @param[in] the_thread The thread. |
---|
961 | * @param[in] lock_context The lock context used for the corresponding lock |
---|
962 | * release. |
---|
963 | * |
---|
964 | * @see _Thread_Lock_release_default(). |
---|
965 | */ |
---|
966 | RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default_critical( |
---|
967 | Thread_Control *the_thread, |
---|
968 | ISR_lock_Context *lock_context |
---|
969 | ) |
---|
970 | { |
---|
971 | _Assert( _ISR_Get_level() != 0 ); |
---|
972 | #if defined(RTEMS_SMP) |
---|
973 | _SMP_ticket_lock_Acquire( |
---|
974 | &the_thread->Lock.Default, |
---|
975 | &_Thread_Executing->Lock.Stats, |
---|
976 | &lock_context->Lock_context.Stats_context |
---|
977 | ); |
---|
978 | #else |
---|
979 | (void) the_thread; |
---|
980 | (void) lock_context; |
---|
981 | #endif |
---|
982 | } |
---|
983 | |
---|
984 | /** |
---|
985 | * @brief Acquires the default thread lock and returns the executing thread. |
---|
986 | * |
---|
987 | * @param[in] lock_context The lock context used for the corresponding lock |
---|
988 | * release. |
---|
989 | * |
---|
990 | * @return The executing thread. |
---|
991 | * |
---|
992 | * @see _Thread_Lock_release_default(). |
---|
993 | */ |
---|
994 | RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Lock_acquire_default_for_executing( |
---|
995 | ISR_lock_Context *lock_context |
---|
996 | ) |
---|
997 | { |
---|
998 | Thread_Control *executing; |
---|
999 | |
---|
1000 | _ISR_lock_ISR_disable( lock_context ); |
---|
1001 | executing = _Thread_Executing; |
---|
1002 | _Thread_Lock_acquire_default_critical( executing, lock_context ); |
---|
1003 | |
---|
1004 | return executing; |
---|
1005 | } |
---|
1006 | |
---|
1007 | /** |
---|
1008 | * @brief Acquires the default thread lock. |
---|
1009 | * |
---|
1010 | * @param[in] the_thread The thread. |
---|
1011 | * @param[in] lock_context The lock context used for the corresponding lock |
---|
1012 | * release. |
---|
1013 | * |
---|
1014 | * @see _Thread_Lock_release_default(). |
---|
1015 | */ |
---|
1016 | RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default( |
---|
1017 | Thread_Control *the_thread, |
---|
1018 | ISR_lock_Context *lock_context |
---|
1019 | ) |
---|
1020 | { |
---|
1021 | _ISR_lock_ISR_disable( lock_context ); |
---|
1022 | _Thread_Lock_acquire_default_critical( the_thread, lock_context ); |
---|
1023 | } |
---|
1024 | |
---|
1025 | /** |
---|
1026 | * @brief Releases the thread lock inside a critical section (interrupts |
---|
1027 | * disabled). |
---|
1028 | * |
---|
1029 | * The previous interrupt status is not restored. |
---|
1030 | * |
---|
1031 | * @param[in] lock The lock. |
---|
1032 | * @param[in] lock_context The lock context used for the corresponding lock |
---|
1033 | * acquire. |
---|
1034 | */ |
---|
1035 | RTEMS_INLINE_ROUTINE void _Thread_Lock_release_critical( |
---|
1036 | void *lock, |
---|
1037 | ISR_lock_Context *lock_context |
---|
1038 | ) |
---|
1039 | { |
---|
1040 | #if defined(RTEMS_SMP) |
---|
1041 | _SMP_ticket_lock_Release( |
---|
1042 | (SMP_ticket_lock_Control *) lock, |
---|
1043 | &lock_context->Lock_context.Stats_context |
---|
1044 | ); |
---|
1045 | #else |
---|
1046 | (void) lock; |
---|
1047 | (void) lock_context; |
---|
1048 | #endif |
---|
1049 | } |
---|
1050 | |
---|
1051 | /** |
---|
1052 | * @brief Releases the thread lock. |
---|
1053 | * |
---|
1054 | * @param[in] lock The lock returned by _Thread_Lock_acquire(). |
---|
1055 | * @param[in] lock_context The lock context used for _Thread_Lock_acquire(). |
---|
1056 | */ |
---|
1057 | RTEMS_INLINE_ROUTINE void _Thread_Lock_release( |
---|
1058 | void *lock, |
---|
1059 | ISR_lock_Context *lock_context |
---|
1060 | ) |
---|
1061 | { |
---|
1062 | _Thread_Lock_release_critical( lock, lock_context ); |
---|
1063 | _ISR_lock_ISR_enable( lock_context ); |
---|
1064 | } |
---|
1065 | |
---|
1066 | /** |
---|
1067 | * @brief Releases the default thread lock inside a critical section |
---|
1068 | * (interrupts disabled). |
---|
1069 | * |
---|
1070 | * The previous interrupt status is not restored. |
---|
1071 | * |
---|
1072 | * @param[in] the_thread The thread. |
---|
1073 | * @param[in] lock_context The lock context used for the corresponding lock |
---|
1074 | * acquire. |
---|
1075 | */ |
---|
1076 | RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default_critical( |
---|
1077 | Thread_Control *the_thread, |
---|
1078 | ISR_lock_Context *lock_context |
---|
1079 | ) |
---|
1080 | { |
---|
1081 | _Thread_Lock_release_critical( |
---|
1082 | #if defined(RTEMS_SMP) |
---|
1083 | &the_thread->Lock.Default, |
---|
1084 | #else |
---|
1085 | NULL, |
---|
1086 | #endif |
---|
1087 | lock_context |
---|
1088 | ); |
---|
1089 | } |
---|
1090 | |
---|
1091 | /** |
---|
1092 | * @brief Releases the default thread lock. |
---|
1093 | * |
---|
1094 | * @param[in] the_thread The thread. |
---|
1095 | * @param[in] lock_context The lock context used for the corresponding lock |
---|
1096 | * acquire. |
---|
1097 | */ |
---|
1098 | RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default( |
---|
1099 | Thread_Control *the_thread, |
---|
1100 | ISR_lock_Context *lock_context |
---|
1101 | ) |
---|
1102 | { |
---|
1103 | _Thread_Lock_release_default_critical( the_thread, lock_context ); |
---|
1104 | _ISR_lock_ISR_enable( lock_context ); |
---|
1105 | } |
---|
1106 | |
---|
1107 | /** |
---|
1108 | * @brief Acquires the thread lock. |
---|
1109 | * |
---|
1110 | * @param[in] the_thread The thread. |
---|
1111 | * @param[in] lock_context The lock context for _Thread_Lock_release(). |
---|
1112 | * |
---|
1113 | * @return The lock required by _Thread_Lock_release(). |
---|
1114 | */ |
---|
1115 | RTEMS_INLINE_ROUTINE void *_Thread_Lock_acquire( |
---|
1116 | Thread_Control *the_thread, |
---|
1117 | ISR_lock_Context *lock_context |
---|
1118 | ) |
---|
1119 | { |
---|
1120 | #if defined(RTEMS_SMP) |
---|
1121 | SMP_ticket_lock_Control *lock_0; |
---|
1122 | |
---|
1123 | while ( true ) { |
---|
1124 | SMP_ticket_lock_Control *lock_1; |
---|
1125 | |
---|
1126 | _ISR_lock_ISR_disable( lock_context ); |
---|
1127 | |
---|
1128 | /* |
---|
1129 | * We assume that a normal load of pointer is identical to a relaxed atomic |
---|
1130 | * load. Here, we may read an out-of-date lock. However, only the owner |
---|
1131 | * of this out-of-date lock is allowed to set a new one. Thus, we read at |
---|
1132 | * least this new lock ... |
---|
1133 | */ |
---|
1134 | lock_0 = (SMP_ticket_lock_Control *) _Atomic_Load_uintptr( |
---|
1135 | &the_thread->Lock.current.atomic, |
---|
1136 | ATOMIC_ORDER_RELAXED |
---|
1137 | ); |
---|
1138 | |
---|
1139 | _SMP_ticket_lock_Acquire( |
---|
1140 | lock_0, |
---|
1141 | &_Thread_Executing->Lock.Stats, |
---|
1142 | &lock_context->Lock_context.Stats_context |
---|
1143 | ); |
---|
1144 | |
---|
1145 | /* |
---|
1146 | * We must use a load acquire here paired with the store release in |
---|
1147 | * _Thread_Lock_set_unprotected() to observe corresponding thread wait |
---|
1148 | * queue and thread wait operations. It is important to do this after the |
---|
1149 | * lock acquire, since we may have the following scenario. |
---|
1150 | * |
---|
1151 | * - We read the default lock and try to acquire it. |
---|
1152 | * - The thread lock changes to a thread queue lock. |
---|
1153 | * - The thread lock is restored to the default lock. |
---|
1154 | * - We acquire the default lock and read it here again. |
---|
1155 | * - Now, we must read the restored default thread wait queue and thread |
---|
1156 | * wait operations and this is not synchronized via the default thread |
---|
1157 | * lock. |
---|
1158 | */ |
---|
1159 | lock_1 = (SMP_ticket_lock_Control *) _Atomic_Load_uintptr( |
---|
1160 | &the_thread->Lock.current.atomic, |
---|
1161 | ATOMIC_ORDER_ACQUIRE |
---|
1162 | ); |
---|
1163 | |
---|
1164 | /* |
---|
1165 | * ... here, and so on. |
---|
1166 | */ |
---|
1167 | if ( lock_0 == lock_1 ) { |
---|
1168 | return lock_0; |
---|
1169 | } |
---|
1170 | |
---|
1171 | _Thread_Lock_release( lock_0, lock_context ); |
---|
1172 | } |
---|
1173 | #else |
---|
1174 | _ISR_Local_disable( lock_context->isr_level ); |
---|
1175 | |
---|
1176 | return NULL; |
---|
1177 | #endif |
---|
1178 | } |
---|
1179 | |
---|
1180 | #if defined(RTEMS_SMP) |
---|
1181 | /* |
---|
1182 | * Internal function, use _Thread_Lock_set() or _Thread_Lock_restore_default() |
---|
1183 | * instead. |
---|
1184 | */ |
---|
1185 | RTEMS_INLINE_ROUTINE void _Thread_Lock_set_unprotected( |
---|
1186 | Thread_Control *the_thread, |
---|
1187 | SMP_ticket_lock_Control *new_lock |
---|
1188 | ) |
---|
1189 | { |
---|
1190 | _Atomic_Store_uintptr( |
---|
1191 | &the_thread->Lock.current.atomic, |
---|
1192 | (uintptr_t) new_lock, |
---|
1193 | ATOMIC_ORDER_RELEASE |
---|
1194 | ); |
---|
1195 | } |
---|
1196 | #endif |
---|
1197 | |
---|
1198 | /** |
---|
1199 | * @brief Sets a new thread lock. |
---|
1200 | * |
---|
1201 | * The caller must not be the owner of the default thread lock. The caller |
---|
1202 | * must be the owner of the new lock. |
---|
1203 | * |
---|
1204 | * @param[in] the_thread The thread. |
---|
1205 | * @param[in] new_lock The new thread lock. |
---|
1206 | */ |
---|
1207 | #if defined(RTEMS_SMP) |
---|
1208 | RTEMS_INLINE_ROUTINE void _Thread_Lock_set( |
---|
1209 | Thread_Control *the_thread, |
---|
1210 | SMP_ticket_lock_Control *new_lock |
---|
1211 | ) |
---|
1212 | { |
---|
1213 | ISR_lock_Context lock_context; |
---|
1214 | |
---|
1215 | _Thread_Lock_acquire_default_critical( the_thread, &lock_context ); |
---|
1216 | _Assert( the_thread->Lock.current.normal == &the_thread->Lock.Default ); |
---|
1217 | _Thread_Lock_set_unprotected( the_thread, new_lock ); |
---|
1218 | _Thread_Lock_release_default_critical( the_thread, &lock_context ); |
---|
1219 | } |
---|
1220 | #else |
---|
1221 | #define _Thread_Lock_set( the_thread, new_lock ) \ |
---|
1222 | do { } while ( 0 ) |
---|
1223 | #endif |
---|
1224 | |
---|
1225 | /** |
---|
1226 | * @brief Restores the default thread lock. |
---|
1227 | * |
---|
1228 | * The caller must be the owner of the current thread lock. |
---|
1229 | * |
---|
1230 | * @param[in] the_thread The thread. |
---|
1231 | */ |
---|
1232 | #if defined(RTEMS_SMP) |
---|
1233 | RTEMS_INLINE_ROUTINE void _Thread_Lock_restore_default( |
---|
1234 | Thread_Control *the_thread |
---|
1235 | ) |
---|
1236 | { |
---|
1237 | _Thread_Lock_set_unprotected( the_thread, &the_thread->Lock.Default ); |
---|
1238 | } |
---|
1239 | #else |
---|
1240 | #define _Thread_Lock_restore_default( the_thread ) \ |
---|
1241 | do { } while ( 0 ) |
---|
1242 | #endif |
---|
1243 | |
---|
1244 | /** |
---|
1245 | * @brief The initial thread wait flags value set by _Thread_Initialize(). |
---|
1246 | */ |
---|
1247 | #define THREAD_WAIT_FLAGS_INITIAL 0x0U |
---|
1248 | |
---|
1249 | /** |
---|
1250 | * @brief Mask to get the thread wait state flags. |
---|
1251 | */ |
---|
1252 | #define THREAD_WAIT_STATE_MASK 0xffU |
---|
1253 | |
---|
1254 | /** |
---|
1255 | * @brief Indicates that the thread begins with the blocking operation. |
---|
1256 | * |
---|
1257 | * A blocking operation consists of an optional watchdog initialization and the |
---|
1258 | * setting of the appropriate thread blocking state with the corresponding |
---|
1259 | * scheduler block operation. |
---|
1260 | */ |
---|
1261 | #define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U |
---|
1262 | |
---|
1263 | /** |
---|
1264 | * @brief Indicates that the thread completed the blocking operation. |
---|
1265 | */ |
---|
1266 | #define THREAD_WAIT_STATE_BLOCKED 0x2U |
---|
1267 | |
---|
1268 | /** |
---|
1269 | * @brief Indicates that a condition to end the thread wait occurred. |
---|
1270 | * |
---|
1271 | * This could be a timeout, a signal, an event or a resource availability. |
---|
1272 | */ |
---|
1273 | #define THREAD_WAIT_STATE_READY_AGAIN 0x4U |
---|
1274 | |
---|
1275 | /** |
---|
1276 | * @brief Mask to get the thread wait class flags. |
---|
1277 | */ |
---|
1278 | #define THREAD_WAIT_CLASS_MASK 0xff00U |
---|
1279 | |
---|
1280 | /** |
---|
1281 | * @brief Indicates that the thread waits for an event. |
---|
1282 | */ |
---|
1283 | #define THREAD_WAIT_CLASS_EVENT 0x100U |
---|
1284 | |
---|
1285 | /** |
---|
1286 | * @brief Indicates that the thread waits for a system event. |
---|
1287 | */ |
---|
1288 | #define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U |
---|
1289 | |
---|
1290 | /** |
---|
1291 | * @brief Indicates that the thread waits for an object. |
---|
1292 | */ |
---|
1293 | #define THREAD_WAIT_CLASS_OBJECT 0x400U |
---|
1294 | |
---|
1295 | /** |
---|
1296 | * @brief Indicates that the thread waits for a period. |
---|
1297 | */ |
---|
1298 | #define THREAD_WAIT_CLASS_PERIOD 0x800U |
---|
1299 | |
---|
1300 | RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set( |
---|
1301 | Thread_Control *the_thread, |
---|
1302 | Thread_Wait_flags flags |
---|
1303 | ) |
---|
1304 | { |
---|
1305 | #if defined(RTEMS_SMP) |
---|
1306 | _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED ); |
---|
1307 | #else |
---|
1308 | the_thread->Wait.flags = flags; |
---|
1309 | #endif |
---|
1310 | } |
---|
1311 | |
---|
1312 | RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get( |
---|
1313 | const Thread_Control *the_thread |
---|
1314 | ) |
---|
1315 | { |
---|
1316 | #if defined(RTEMS_SMP) |
---|
1317 | return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED ); |
---|
1318 | #else |
---|
1319 | return the_thread->Wait.flags; |
---|
1320 | #endif |
---|
1321 | } |
---|
1322 | |
---|
1323 | /** |
---|
1324 | * @brief Tries to change the thread wait flags with release semantics in case |
---|
1325 | * of success. |
---|
1326 | * |
---|
1327 | * Must be called inside a critical section (interrupts disabled). |
---|
1328 | * |
---|
1329 | * In case the wait flags are equal to the expected wait flags, then the wait |
---|
1330 | * flags are set to the desired wait flags. |
---|
1331 | * |
---|
1332 | * @param[in] the_thread The thread. |
---|
1333 | * @param[in] expected_flags The expected wait flags. |
---|
1334 | * @param[in] desired_flags The desired wait flags. |
---|
1335 | * |
---|
1336 | * @retval true The wait flags were equal to the expected wait flags. |
---|
1337 | * @retval false Otherwise. |
---|
1338 | */ |
---|
1339 | RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release( |
---|
1340 | Thread_Control *the_thread, |
---|
1341 | Thread_Wait_flags expected_flags, |
---|
1342 | Thread_Wait_flags desired_flags |
---|
1343 | ) |
---|
1344 | { |
---|
1345 | _Assert( _ISR_Get_level() != 0 ); |
---|
1346 | |
---|
1347 | #if defined(RTEMS_SMP) |
---|
1348 | return _Atomic_Compare_exchange_uint( |
---|
1349 | &the_thread->Wait.flags, |
---|
1350 | &expected_flags, |
---|
1351 | desired_flags, |
---|
1352 | ATOMIC_ORDER_RELEASE, |
---|
1353 | ATOMIC_ORDER_RELAXED |
---|
1354 | ); |
---|
1355 | #else |
---|
1356 | bool success = ( the_thread->Wait.flags == expected_flags ); |
---|
1357 | |
---|
1358 | if ( success ) { |
---|
1359 | the_thread->Wait.flags = desired_flags; |
---|
1360 | } |
---|
1361 | |
---|
1362 | return success; |
---|
1363 | #endif |
---|
1364 | } |
---|
1365 | |
---|
1366 | /** |
---|
1367 | * @brief Tries to change the thread wait flags with acquire semantics. |
---|
1368 | * |
---|
1369 | * In case the wait flags are equal to the expected wait flags, then the wait |
---|
1370 | * flags are set to the desired wait flags. |
---|
1371 | * |
---|
1372 | * @param[in] the_thread The thread. |
---|
1373 | * @param[in] expected_flags The expected wait flags. |
---|
1374 | * @param[in] desired_flags The desired wait flags. |
---|
1375 | * |
---|
1376 | * @retval true The wait flags were equal to the expected wait flags. |
---|
1377 | * @retval false Otherwise. |
---|
1378 | */ |
---|
1379 | RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire( |
---|
1380 | Thread_Control *the_thread, |
---|
1381 | Thread_Wait_flags expected_flags, |
---|
1382 | Thread_Wait_flags desired_flags |
---|
1383 | ) |
---|
1384 | { |
---|
1385 | bool success; |
---|
1386 | #if defined(RTEMS_SMP) |
---|
1387 | return _Atomic_Compare_exchange_uint( |
---|
1388 | &the_thread->Wait.flags, |
---|
1389 | &expected_flags, |
---|
1390 | desired_flags, |
---|
1391 | ATOMIC_ORDER_ACQUIRE, |
---|
1392 | ATOMIC_ORDER_ACQUIRE |
---|
1393 | ); |
---|
1394 | #else |
---|
1395 | ISR_Level level; |
---|
1396 | |
---|
1397 | _ISR_Local_disable( level ); |
---|
1398 | |
---|
1399 | success = _Thread_Wait_flags_try_change_release( |
---|
1400 | the_thread, |
---|
1401 | expected_flags, |
---|
1402 | desired_flags |
---|
1403 | ); |
---|
1404 | |
---|
1405 | _ISR_Local_enable( level ); |
---|
1406 | #endif |
---|
1407 | |
---|
1408 | return success; |
---|
1409 | } |
---|
1410 | |
---|
1411 | /** |
---|
1412 | * @brief Sets the thread queue. |
---|
1413 | * |
---|
1414 | * The caller must be the owner of the thread lock. |
---|
1415 | * |
---|
1416 | * @param[in] the_thread The thread. |
---|
1417 | * @param[in] new_queue The new queue. |
---|
1418 | * |
---|
1419 | * @see _Thread_Lock_set(). |
---|
1420 | */ |
---|
1421 | RTEMS_INLINE_ROUTINE void _Thread_Wait_set_queue( |
---|
1422 | Thread_Control *the_thread, |
---|
1423 | Thread_queue_Queue *new_queue |
---|
1424 | ) |
---|
1425 | { |
---|
1426 | the_thread->Wait.queue = new_queue; |
---|
1427 | } |
---|
1428 | |
---|
1429 | /** |
---|
1430 | * @brief Sets the thread queue operations. |
---|
1431 | * |
---|
1432 | * The caller must be the owner of the thread lock. |
---|
1433 | * |
---|
1434 | * @param[in] the_thread The thread. |
---|
1435 | * @param[in] new_operations The new queue operations. |
---|
1436 | * |
---|
1437 | * @see _Thread_Lock_set() and _Thread_Wait_restore_default_operations(). |
---|
1438 | */ |
---|
1439 | RTEMS_INLINE_ROUTINE void _Thread_Wait_set_operations( |
---|
1440 | Thread_Control *the_thread, |
---|
1441 | const Thread_queue_Operations *new_operations |
---|
1442 | ) |
---|
1443 | { |
---|
1444 | the_thread->Wait.operations = new_operations; |
---|
1445 | } |
---|
1446 | |
---|
1447 | /** |
---|
1448 | * @brief Restores the default thread queue operations. |
---|
1449 | * |
---|
1450 | * The caller must be the owner of the thread lock. |
---|
1451 | * |
---|
1452 | * @param[in] the_thread The thread. |
---|
1453 | * |
---|
1454 | * @see _Thread_Wait_set_operations(). |
---|
1455 | */ |
---|
1456 | RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default_operations( |
---|
1457 | Thread_Control *the_thread |
---|
1458 | ) |
---|
1459 | { |
---|
1460 | the_thread->Wait.operations = &_Thread_queue_Operations_default; |
---|
1461 | } |
---|
1462 | |
---|
1463 | /** |
---|
1464 | * @brief Returns the object identifier of the object containing the current |
---|
1465 | * thread wait queue. |
---|
1466 | * |
---|
1467 | * This function may be used for debug and system information purposes. The |
---|
1468 | * caller must be the owner of the thread lock. |
---|
1469 | * |
---|
1470 | * @retval 0 The thread waits on no thread queue currently, the thread wait |
---|
1471 | * queue is not contained in an object, or the current thread state provides |
---|
1472 | * insufficient information, e.g. the thread is in the middle of a blocking |
---|
1473 | * operation. |
---|
1474 | * @retval other The object identifier of the object containing the thread wait |
---|
1475 | * queue. |
---|
1476 | */ |
---|
1477 | Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread ); |
---|
1478 | |
---|
1479 | RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status( |
---|
1480 | const Thread_Control *the_thread |
---|
1481 | ) |
---|
1482 | { |
---|
1483 | return (Status_Control) the_thread->Wait.return_code; |
---|
1484 | } |
---|
1485 | |
---|
1486 | /** |
---|
1487 | * @brief General purpose thread wait timeout. |
---|
1488 | * |
---|
1489 | * @param[in] watchdog The thread timer watchdog. |
---|
1490 | */ |
---|
1491 | void _Thread_Timeout( Watchdog_Control *watchdog ); |
---|
1492 | |
---|
1493 | RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize( |
---|
1494 | Thread_Timer_information *timer, |
---|
1495 | Per_CPU_Control *cpu |
---|
1496 | ) |
---|
1497 | { |
---|
1498 | _ISR_lock_Initialize( &timer->Lock, "Thread Timer" ); |
---|
1499 | timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ]; |
---|
1500 | _Watchdog_Preinitialize( &timer->Watchdog, cpu ); |
---|
1501 | } |
---|
1502 | |
---|
1503 | RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative( |
---|
1504 | Thread_Control *the_thread, |
---|
1505 | Per_CPU_Control *cpu, |
---|
1506 | Watchdog_Service_routine_entry routine, |
---|
1507 | Watchdog_Interval ticks |
---|
1508 | ) |
---|
1509 | { |
---|
1510 | ISR_lock_Context lock_context; |
---|
1511 | |
---|
1512 | _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context ); |
---|
1513 | |
---|
1514 | the_thread->Timer.header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ]; |
---|
1515 | the_thread->Timer.Watchdog.routine = routine; |
---|
1516 | _Watchdog_Per_CPU_insert_relative( &the_thread->Timer.Watchdog, cpu, ticks ); |
---|
1517 | |
---|
1518 | _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context ); |
---|
1519 | } |
---|
1520 | |
---|
1521 | RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_absolute( |
---|
1522 | Thread_Control *the_thread, |
---|
1523 | Per_CPU_Control *cpu, |
---|
1524 | Watchdog_Service_routine_entry routine, |
---|
1525 | uint64_t expire |
---|
1526 | ) |
---|
1527 | { |
---|
1528 | ISR_lock_Context lock_context; |
---|
1529 | |
---|
1530 | _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context ); |
---|
1531 | |
---|
1532 | the_thread->Timer.header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_ABSOLUTE ]; |
---|
1533 | the_thread->Timer.Watchdog.routine = routine; |
---|
1534 | _Watchdog_Per_CPU_insert_absolute( &the_thread->Timer.Watchdog, cpu, expire ); |
---|
1535 | |
---|
1536 | _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context ); |
---|
1537 | } |
---|
1538 | |
---|
1539 | RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread ) |
---|
1540 | { |
---|
1541 | ISR_lock_Context lock_context; |
---|
1542 | |
---|
1543 | _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context ); |
---|
1544 | |
---|
1545 | _Watchdog_Per_CPU_remove( |
---|
1546 | &the_thread->Timer.Watchdog, |
---|
1547 | #if defined(RTEMS_SMP) |
---|
1548 | the_thread->Timer.Watchdog.cpu, |
---|
1549 | #else |
---|
1550 | _Per_CPU_Get(), |
---|
1551 | #endif |
---|
1552 | the_thread->Timer.header |
---|
1553 | ); |
---|
1554 | |
---|
1555 | _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context ); |
---|
1556 | } |
---|
1557 | |
---|
1558 | RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock( |
---|
1559 | Thread_Control *the_thread, |
---|
1560 | Thread_queue_Queue *queue |
---|
1561 | ) |
---|
1562 | { |
---|
1563 | _Thread_Timer_remove( the_thread ); |
---|
1564 | |
---|
1565 | #if defined(RTEMS_MULTIPROCESSING) |
---|
1566 | if ( _Objects_Is_local_id( the_thread->Object.id ) ) { |
---|
1567 | _Thread_Unblock( the_thread ); |
---|
1568 | } else { |
---|
1569 | _Thread_queue_Unblock_proxy( queue, the_thread ); |
---|
1570 | } |
---|
1571 | #else |
---|
1572 | (void) queue; |
---|
1573 | _Thread_Unblock( the_thread ); |
---|
1574 | #endif |
---|
1575 | } |
---|
1576 | |
---|
1577 | RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor( |
---|
1578 | Thread_Control *the_thread, |
---|
1579 | Per_CPU_Control *cpu |
---|
1580 | ) |
---|
1581 | { |
---|
1582 | #if defined(RTEMS_SMP) && defined(RTEMS_DEBUG) |
---|
1583 | the_thread->Scheduler.debug_real_cpu = cpu; |
---|
1584 | #else |
---|
1585 | (void) the_thread; |
---|
1586 | (void) cpu; |
---|
1587 | #endif |
---|
1588 | } |
---|
1589 | |
---|
1590 | /** @}*/ |
---|
1591 | |
---|
1592 | #ifdef __cplusplus |
---|
1593 | } |
---|
1594 | #endif |
---|
1595 | |
---|
1596 | #if defined(RTEMS_MULTIPROCESSING) |
---|
1597 | #include <rtems/score/threadmp.h> |
---|
1598 | #endif |
---|
1599 | |
---|
1600 | #endif |
---|
1601 | /* end of include file */ |
---|