source: rtems/cpukit/include/rtems/score/threadimpl.h @ 57be57c7

Last change on this file since 57be57c7 was 57be57c7, checked in by Sebastian Huber <sebastian.huber@…>, on May 14, 2021 at 7:31:47 AM

score: Return status in _Thread_Restart_other()

This simplifies rtems_task_restart().

  • Property mode set to 100644
File size: 71.1 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup RTEMSScoreThread
5 *
6 * @brief This header file provides interfaces of the
7 *   @ref RTEMSScoreThread which are only used by the implementation.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2017 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/schedulernodeimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/timestampimpl.h>
35#include <rtems/score/threadqimpl.h>
36#include <rtems/score/todimpl.h>
37#include <rtems/score/watchdogimpl.h>
38#include <rtems/config.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/**
45 * @addtogroup RTEMSScoreThread
46 *
47 * @{
48 */
49
50/**
51 *  Self for the GNU Ada Run-Time
52 */
53extern void *rtems_ada_self;
54
55/**
56 * @brief Object identifier of the global constructor thread.
57 *
58 * This variable is set by _RTEMS_tasks_Initialize_user_tasks_body() or
59 * _POSIX_Threads_Initialize_user_threads_body().
60 *
61 * It is consumed by _Thread_Handler().
62 */
63extern Objects_Id _Thread_Global_constructor;
64
65/**
66 *  The following points to the thread whose floating point
67 *  context is currently loaded.
68 */
69#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
70extern Thread_Control *_Thread_Allocated_fp;
71#endif
72
73#if defined(RTEMS_SMP)
74#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
75  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
76#endif
77
78typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
79
80/**
81 * @brief Calls the visitor with all threads and the given argument until
82 *      it is done.
83 *
84 * @param visitor Function that gets a thread and @a arg as parameters and
85 *      returns if it is done.
86 * @param arg Parameter for @a visitor
87 */
88void _Thread_Iterate(
89  Thread_Visitor  visitor,
90  void           *arg
91);
92
93/**
94 * @brief Initializes the thread information
95 *
96 * @param[out] information Information to initialize.
97 */
98void _Thread_Initialize_information( Thread_Information *information );
99
100/**
101 * @brief Initializes thread handler.
102 *
103 * This routine performs the initialization necessary for this handler.
104 */
105void _Thread_Handler_initialization(void);
106
107/**
108 * @brief Creates idle thread.
109 *
110 * This routine creates the idle thread.
111 *
112 * @warning No thread should be created before this one.
113 */
114void _Thread_Create_idle(void);
115
116/**
117 * @brief Starts thread multitasking.
118 *
119 * This routine initiates multitasking.  It is invoked only as
120 * part of initialization and its invocation is the last act of
121 * the non-multitasking part of the system initialization.
122 */
123RTEMS_NO_RETURN void _Thread_Start_multitasking( void );
124
125/**
126 * @brief The configuration of a new thread to initialize.
127 */
128typedef struct {
129  /**
130   * @brief The scheduler control instance for the thread.
131   */
132  const struct _Scheduler_Control *scheduler;
133
134  /**
135   * @brief The starting address of the stack area.
136   */
137  void *stack_area;
138
139  /**
140   * @brief The size of the stack area in bytes.
141   */
142  size_t stack_size;
143
144  /**
145   * @brief This member contains the handler to free the stack.
146   *
147   * It shall not be NULL.  Use _Objects_Free_nothing() if nothing is to free.
148   */
149  void ( *stack_free )( void * );
150
151  /**
152   * @brief The new thread's priority.
153   */
154  Priority_Control priority;
155
156  /**
157   * @brief The thread's budget algorithm.
158   */
159  Thread_CPU_budget_algorithms budget_algorithm;
160
161  /**
162   * @brief The thread's initial budget callout.
163   */
164  Thread_CPU_budget_algorithm_callout budget_callout;
165
166  /**
167   * @brief The thread's initial CPU time budget.
168   */
169  uint32_t cpu_time_budget;
170
171  /**
172   * @brief 32-bit unsigned integer name of the object for the thread.
173   */
174  uint32_t name;
175
176  /**
177   * @brief The thread's initial ISR level.
178   */
179  uint32_t isr_level;
180
181  /**
182   * @brief Indicates whether the thread needs a floating-point area.
183   */
184  bool is_fp;
185
186  /**
187   * @brief Indicates whether the new thread is preemptible.
188   */
189  bool is_preemptible;
190} Thread_Configuration;
191
192/**
193 * @brief Initializes thread.
194 *
195 * This routine initializes the specified the thread.  It allocates
196 * all memory associated with this thread.  It completes by adding
197 * the thread to the local object table so operations on this
198 * thread id are allowed.
199 *
200 * @note If stack_area is NULL, it is allocated from the workspace.
201 *
202 * @note If the stack is allocated from the workspace, then it is
203 *       guaranteed to be of at least minimum size.
204 *
205 * @param information The thread information.
206 * @param the_thread The thread to initialize.
207 * @param config The configuration of the thread to initialize.
208 *
209 * @retval STATUS_SUCCESSFUL The thread initialization was successful.
210 *
211 * @retval STATUS_UNSATISFIED The thread initialization failed.
212 */
213Status_Control _Thread_Initialize(
214  Thread_Information         *information,
215  Thread_Control             *the_thread,
216  const Thread_Configuration *config
217);
218
219/**
220 * @brief Frees the thread.
221 *
222 * This routine invokes the thread delete extensions and frees all resources
223 * associated with the thread.  Afterwards the thread object is closed.
224 *
225 * @param[in, out] information is the thread information.
226 *
227 * @param[in, out] the_thread is the thread to free.
228 */
229void _Thread_Free(
230  Thread_Information *information,
231  Thread_Control     *the_thread
232);
233
234/**
235 * @brief Starts the specified thread.
236 *
237 * If the thread is not in the dormant state, the routine returns with a value
238 * of false and performs no actions except enabling interrupts as indicated by
239 * the ISR lock context.
240 *
241 * Otherwise, this routine initializes the executable information for the
242 * thread and makes it ready to execute.  After the call of this routine, the
243 * thread competes with all other ready threads for CPU time.
244 *
245 * Then the routine enables the local interrupts as indicated by the ISR lock
246 * context.
247 *
248 * Then the thread start user extensions are called with thread dispatching
249 * disabled and interrupts enabled after making the thread ready.  Please note
250 * that in SMP configurations, the thread switch and begin user extensions may
251 * be called in parallel on another processor.
252 *
253 * Then thread dispatching is enabled and other threads may execute before the
254 * routine returns.
255 *
256 * @param[in, out] the_thread is the thread to start.
257 *
258 * @param entry is the thread entry information.
259 *
260 * @param[in, out] is the ISR lock context which shall be used to disable the
261 *   local interrupts before the call of this routine.
262 *
263 * @retval STATUS_SUCCESSFUL The thread start was successful.
264 *
265 * @retval STATUS_INCORRECT_STATE The thread was already started.
266 */
267Status_Control _Thread_Start(
268  Thread_Control                 *the_thread,
269  const Thread_Entry_information *entry,
270  ISR_lock_Context               *lock_context
271);
272
273/**
274 * @brief Restarts the currently executing thread.
275 *
276 * @param[in, out] executing The currently executing thread.
277 * @param entry The start entry information for @a executing.
278 * @param lock_context The lock context.
279 */
280RTEMS_NO_RETURN void _Thread_Restart_self(
281  Thread_Control                 *executing,
282  const Thread_Entry_information *entry,
283  ISR_lock_Context               *lock_context
284);
285
286/**
287 * @brief Restarts the thread.
288 *
289 * @param[in, out] the_thread is the thread to restart.
290 *
291 * @param entry is the new start entry information for the thread to restart.
292 *
293 * @param[in, out] lock_context is the lock context with interrupts disabled.
294 *
295 * @retval STATUS_SUCCESSFUL The operation was successful.
296 *
297 * @retval STATUS_INCORRECT_STATE The thread was dormant.
298 */
299Status_Control _Thread_Restart_other(
300  Thread_Control                 *the_thread,
301  const Thread_Entry_information *entry,
302  ISR_lock_Context               *lock_context
303);
304
305/**
306 * @brief Yields the currently executing thread.
307 *
308 * @param[in, out] executing The thread that performs a yield.
309 */
310void _Thread_Yield( Thread_Control *executing );
311
312/**
313 * @brief Changes the currently executing thread to a new state with the sets.
314 *
315 * @param clear States to clear.
316 * @param set States to set.
317 * @param ignore States to ignore.
318 *
319 * @return The previous state the thread was in.
320 */
321Thread_Life_state _Thread_Change_life(
322  Thread_Life_state clear,
323  Thread_Life_state set,
324  Thread_Life_state ignore
325);
326
327/**
328 * @brief Set the thread to life protected.
329 *
330 * Calls _Thread_Change_life with the given state AND THREAD_LIFE_PROTECTED to
331 * set and THREAD_LIFE_PROTECTED to clear.
332 *
333 * @param state The states to set.
334 *
335 * @return The previous state the thread was in.
336 */
337Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
338
339/**
340 * @brief Kills all zombie threads in the system.
341 *
342 * Threads change into the zombie state as the last step in the thread
343 * termination sequence right before a context switch to the heir thread is
344 * initiated.  Since the thread stack is still in use during this phase we have
345 * to postpone the thread stack reclamation until this point.  On SMP
346 * configurations we may have to busy wait for context switch completion here.
347 */
348void _Thread_Kill_zombies( void );
349
350/**
351 * @brief Exits the currently executing thread.
352 *
353 * @param[in, out] executing The currently executing thread.
354 * @param set The states to set.
355 * @param[out] exit_value Contains the exit value of the thread.
356 */
357void _Thread_Exit(
358  Thread_Control    *executing,
359  Thread_Life_state  set,
360  void              *exit_value
361);
362
363/**
364 * @brief Joins the currently executing thread with the given thread to wait
365 *      for.
366 *
367 * @param[in, out] the_thread The thread to wait for.
368 * @param waiting_for_join The states control for the join.
369 * @param[in, out] executing The currently executing thread.
370 * @param queue_context The thread queue context.
371 */
372void _Thread_Join(
373  Thread_Control       *the_thread,
374  States_Control        waiting_for_join,
375  Thread_Control       *executing,
376  Thread_queue_Context *queue_context
377);
378
379/**
380 * @brief Cancels the thread.
381 *
382 * @param[in, out] the_thread The thread to cancel.
383 * @param executing The currently executing thread.
384 * @param exit_value The exit value for the thread.
385 */
386void _Thread_Cancel(
387  Thread_Control *the_thread,
388  Thread_Control *executing,
389  void           *exit_value
390);
391
392typedef struct {
393  Thread_queue_Context  Base;
394  Thread_Control       *cancel;
395} Thread_Close_context;
396
397/**
398 * @brief Closes the thread.
399 *
400 * Closes the thread object and starts the thread termination sequence.  In
401 * case the executing thread is not terminated, then this function waits until
402 * the terminating thread reached the zombie state.
403 *
404 * @param the_thread The thread to close.
405 * @param executing The currently executing thread.
406 * @param[in, out] context The thread close context.
407 */
408void _Thread_Close(
409  Thread_Control       *the_thread,
410  Thread_Control       *executing,
411  Thread_Close_context *context
412);
413
414/**
415 * @brief Checks if the thread is ready.
416 *
417 * @param the_thread The thread to check if it is ready.
418 *
419 * @retval true The thread is currently in the ready state.
420 * @retval false The thread is currently not ready.
421 */
422RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
423{
424  return _States_Is_ready( the_thread->current_state );
425}
426
427/**
428 * @brief Clears the specified thread state without locking the lock context.
429 *
430 * In the case the previous state is a non-ready state and the next state is
431 * the ready state, then the thread is unblocked by the scheduler.
432 *
433 * @param[in, out] the_thread The thread.
434 * @param state The state to clear.  It must not be zero.
435 *
436 * @return The thread's previous state.
437 */
438States_Control _Thread_Clear_state_locked(
439  Thread_Control *the_thread,
440  States_Control  state
441);
442
443/**
444 * @brief Clears the specified thread state.
445 *
446 * In the case the previous state is a non-ready state and the next state is
447 * the ready state, then the thread is unblocked by the scheduler.
448 *
449 * @param[in, out] the_thread The thread.
450 * @param state The state to clear.  It must not be zero.
451 *
452 * @return The previous state.
453 */
454States_Control _Thread_Clear_state(
455  Thread_Control *the_thread,
456  States_Control  state
457);
458
459/**
460 * @brief Sets the specified thread state without locking the lock context.
461 *
462 * In the case the previous state is the ready state, then the thread is blocked
463 * by the scheduler.
464 *
465 * @param[in, out] the_thread The thread.
466 * @param state The state to set.  It must not be zero.
467 *
468 * @return The previous state.
469 */
470States_Control _Thread_Set_state_locked(
471  Thread_Control *the_thread,
472  States_Control  state
473);
474
475/**
476 * @brief Sets the specified thread state.
477 *
478 * In the case the previous state is the ready state, then the thread is blocked
479 * by the scheduler.
480 *
481 * @param[in, out] the_thread The thread.
482 * @param state The state to set.  It must not be zero.
483 *
484 * @return The previous state.
485 */
486States_Control _Thread_Set_state(
487  Thread_Control *the_thread,
488  States_Control  state
489);
490
491/**
492 * @brief Initializes enviroment for a thread.
493 *
494 * This routine initializes the context of @a the_thread to its
495 * appropriate starting state.
496 *
497 * @param[in, out] the_thread The pointer to the thread control block.
498 */
499void _Thread_Load_environment(
500  Thread_Control *the_thread
501);
502
503/**
504 * @brief Calls the start kinds idle entry of the thread.
505 *
506 * @param executing The currently executing thread.
507 */
508void _Thread_Entry_adaptor_idle( Thread_Control *executing );
509
510/**
511 * @brief Calls the start kinds numeric entry of the thread.
512 *
513 * @param executing The currently executing thread.
514 */
515void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
516
517/**
518 * @brief Calls the start kinds pointer entry of the thread.
519 *
520 * Stores the return value in the Wait.return_argument of the thread.
521 *
522 * @param executing The currently executing thread.
523 */
524void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
525
526/**
527 * @brief Wrapper function for all threads.
528 *
529 * This routine is the wrapper function for all threads.  It is
530 * the starting point for all threads.  The user provided thread
531 * entry point is invoked by this routine.  Operations
532 * which must be performed immediately before and after the user's
533 * thread executes are found here.
534 *
535 * @note On entry, it is assumed all interrupts are blocked and that this
536 * routine needs to set the initial isr level.  This may or may not
537 * actually be needed by the context switch routine and as a result
538 * interrupts may already be at there proper level.  Either way,
539 * setting the initial isr level properly here is safe.
540 */
541void _Thread_Handler( void );
542
543/**
544 * @brief Acquires the lock context in a critical section.
545 *
546 * @param the_thread The thread to acquire the lock context.
547 * @param lock_context The lock context.
548 */
549RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
550  Thread_Control   *the_thread,
551  ISR_lock_Context *lock_context
552)
553{
554  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
555}
556
557/**
558 * @brief Disables interrupts and acquires the lock_context.
559 *
560 * @param the_thread The thread to acquire the lock context.
561 * @param lock_context The lock context.
562 */
563RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
564  Thread_Control   *the_thread,
565  ISR_lock_Context *lock_context
566)
567{
568  _ISR_lock_ISR_disable( lock_context );
569  _Thread_State_acquire_critical( the_thread, lock_context );
570}
571
572/**
573 * @brief Disables interrupts and acquires the lock context for the currently
574 *      executing thread.
575 *
576 * @param lock_context The lock context.
577 *
578 * @return The currently executing thread.
579 */
580RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
581  ISR_lock_Context *lock_context
582)
583{
584  Thread_Control *executing;
585
586  _ISR_lock_ISR_disable( lock_context );
587  executing = _Thread_Executing;
588  _Thread_State_acquire_critical( executing, lock_context );
589
590  return executing;
591}
592
593/**
594 * @brief Release the lock context in a critical section.
595 *
596 * @param the_thread The thread to release the lock context.
597 * @param lock_context The lock context.
598 */
599RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
600  Thread_Control   *the_thread,
601  ISR_lock_Context *lock_context
602)
603{
604  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
605}
606
607/**
608 * @brief Releases the lock context and enables interrupts.
609 *
610 * @param[in, out] the_thread The thread to release the lock context.
611 * @param[out] lock_context The lock context.
612 */
613RTEMS_INLINE_ROUTINE void _Thread_State_release(
614  Thread_Control   *the_thread,
615  ISR_lock_Context *lock_context
616)
617{
618  _Thread_State_release_critical( the_thread, lock_context );
619  _ISR_lock_ISR_enable( lock_context );
620}
621
622/**
623 * @brief Checks if the thread is owner of the lock of the join queue.
624 *
625 * @param the_thread The thread for the verification.
626 *
627 * @retval true The thread is owner of the lock of the join queue.
628 * @retval false The thread is not owner of the lock of the join queue.
629 */
630#if defined(RTEMS_DEBUG)
631RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
632  const Thread_Control *the_thread
633)
634{
635  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
636}
637#endif
638
639/**
640 * @brief Performs the priority actions specified by the thread queue context
641 * along the thread queue path.
642 *
643 * The caller must be the owner of the thread wait lock.
644 *
645 * @param start_of_path The start thread of the thread queue path.
646 * @param queue_context The thread queue context specifying the thread queue
647 *   path and initial thread priority actions.
648 *
649 * @see _Thread_queue_Path_acquire_critical().
650 */
651void _Thread_Priority_perform_actions(
652  Thread_Control       *start_of_path,
653  Thread_queue_Context *queue_context
654);
655
656/**
657 * @brief Adds the specified thread priority node to the corresponding thread
658 * priority aggregation.
659 *
660 * The caller must be the owner of the thread wait lock.
661 *
662 * @param the_thread The thread.
663 * @param priority_node The thread priority node to add.
664 * @param queue_context The thread queue context to return an updated set of
665 *   threads for _Thread_Priority_update().  The thread queue context must be
666 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
667 *   call of this function.
668 *
669 * @see _Thread_Wait_acquire().
670 */
671void _Thread_Priority_add(
672  Thread_Control       *the_thread,
673  Priority_Node        *priority_node,
674  Thread_queue_Context *queue_context
675);
676
677/**
678 * @brief Removes the specified thread priority node from the corresponding
679 * thread priority aggregation.
680 *
681 * The caller must be the owner of the thread wait lock.
682 *
683 * @param the_thread The thread.
684 * @param priority_node The thread priority node to remove.
685 * @param queue_context The thread queue context to return an updated set of
686 *   threads for _Thread_Priority_update().  The thread queue context must be
687 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
688 *   call of this function.
689 *
690 * @see _Thread_Wait_acquire().
691 */
692void _Thread_Priority_remove(
693  Thread_Control       *the_thread,
694  Priority_Node        *priority_node,
695  Thread_queue_Context *queue_context
696);
697
698/**
699 * @brief Propagates a thread priority value change in the specified thread
700 * priority node to the corresponding thread priority aggregation.
701 *
702 * The caller must be the owner of the thread wait lock.
703 *
704 * @param the_thread The thread.
705 * @param[out] priority_node The thread priority node to change.
706 * @param prepend_it In case this is true, then the thread is prepended to
707 *   its priority group in its home scheduler instance, otherwise it is
708 *   appended.
709 * @param queue_context The thread queue context to return an updated set of
710 *   threads for _Thread_Priority_update().  The thread queue context must be
711 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
712 *   call of this function.
713 *
714 * @see _Thread_Wait_acquire().
715 */
716void _Thread_Priority_changed(
717  Thread_Control       *the_thread,
718  Priority_Node        *priority_node,
719  bool                  prepend_it,
720  Thread_queue_Context *queue_context
721);
722
723/**
724 * @brief Changes the thread priority value of the specified thread priority
725 * node in the corresponding thread priority aggregation.
726 *
727 * The caller must be the owner of the thread wait lock.
728 *
729 * @param the_thread The thread.
730 * @param[out] priority_node The thread priority node to change.
731 * @param new_priority The new thread priority value of the thread priority
732 *   node to change.
733 * @param prepend_it In case this is true, then the thread is prepended to
734 *   its priority group in its home scheduler instance, otherwise it is
735 *   appended.
736 * @param queue_context The thread queue context to return an updated set of
737 *   threads for _Thread_Priority_update().  The thread queue context must be
738 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
739 *   call of this function.
740 *
741 * @see _Thread_Wait_acquire().
742 */
743RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
744  Thread_Control       *the_thread,
745  Priority_Node        *priority_node,
746  Priority_Control      new_priority,
747  bool                  prepend_it,
748  Thread_queue_Context *queue_context
749)
750{
751  _Priority_Node_set_priority( priority_node, new_priority );
752  _Thread_Priority_changed(
753    the_thread,
754    priority_node,
755    prepend_it,
756    queue_context
757  );
758}
759
760/**
761 * @brief Replaces the victim priority node with the replacement priority node
762 * in the corresponding thread priority aggregation.
763 *
764 * The caller must be the owner of the thread wait lock.
765 *
766 * @param the_thread The thread.
767 * @param victim_node The victim thread priority node.
768 * @param replacement_node The replacement thread priority node.
769 *
770 * @see _Thread_Wait_acquire().
771 */
772void _Thread_Priority_replace(
773  Thread_Control *the_thread,
774  Priority_Node  *victim_node,
775  Priority_Node  *replacement_node
776);
777
778/**
779 * @brief Updates the priority of all threads in the set
780 *
781 * @param queue_context The thread queue context to return an updated set of
782 *   threads for _Thread_Priority_update().  The thread queue context must be
783 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
784 *   call of this function.
785 *
786 * @see _Thread_Priority_add(), _Thread_Priority_change(),
787 *   _Thread_Priority_changed() and _Thread_Priority_remove().
788 */
789void _Thread_Priority_update( Thread_queue_Context *queue_context );
790
791/**
792 * @brief Updates the priority of the thread and changes it sticky level.
793 *
794 * @param the_thread The thread.
795 * @param sticky_level_change The new value for the sticky level.
796 */
797#if defined(RTEMS_SMP)
798void _Thread_Priority_and_sticky_update(
799  Thread_Control *the_thread,
800  int             sticky_level_change
801);
802#endif
803
804/**
805 * @brief Checks if the left thread priority is less than the right thread
806 *      priority in the intuitive sense of priority.
807 *
808 * @param left The left thread priority.
809 * @param right The right thread priority.
810 *
811 * @retval true The left priority is less in the intuitive sense.
812 * @retval false The left priority is greater or equal in the intuitive sense.
813 */
814RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
815  Priority_Control left,
816  Priority_Control right
817)
818{
819  return left > right;
820}
821
822/**
823 * @brief Returns the highest priority of the left and right thread priorities
824 * in the intuitive sense of priority.
825 *
826 * @param left The left thread priority.
827 * @param right The right thread priority.
828 *
829 * @return The highest priority in the intuitive sense of priority.
830 */
831RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
832  Priority_Control left,
833  Priority_Control right
834)
835{
836  return _Thread_Priority_less_than( left, right ) ? right : left;
837}
838
839/**
840 * @brief Gets the thread object information for the API of the object
841 *   identifier.
842 *
843 * @param id is an object identifier which defines the API to get the
844 *   associated thread objects information.
845 *
846 * @retval NULL The object identifier had an invalid API.
847 *
848 * @return Returns the thread object information associated with the API of the
849 *   object identifier.
850 */
851RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information_by_id(
852  Objects_Id id
853)
854{
855  uint32_t the_api;
856
857  the_api = _Objects_Get_API( id );
858
859  if ( !_Objects_Is_api_valid( the_api ) ) {
860    return NULL;
861  }
862
863  /*
864   * Threads are always first class :)
865   *
866   * There is no need to validate the object class of the object identifier,
867   * since this will be done by the object get methods.
868   */
869  return _Objects_Information_table[ the_api ][ 1 ];
870}
871
872/**
873 * @brief Gets the thread object information of the thread.
874 *
875 * @param the_thread is the thread to get the thread object information.
876 *
877 * @return Returns the thread object information of the thread.
878 */
879RTEMS_INLINE_ROUTINE Thread_Information *_Thread_Get_objects_information(
880  Thread_Control *the_thread
881)
882{
883  size_t              the_api;
884  Thread_Information *information;
885
886  the_api = (size_t) _Objects_Get_API( the_thread->Object.id );
887  _Assert( _Objects_Is_api_valid( the_api ) );
888
889  information = (Thread_Information *)
890    _Objects_Information_table[ the_api ][ 1 ];
891  _Assert( information != NULL );
892
893  return information;
894}
895
896/**
897 * @brief Gets a thread by its identifier.
898 *
899 * @see _Objects_Get().
900 *
901 * @param id The id of the thread.
902 * @param lock_context The lock context.
903 */
904Thread_Control *_Thread_Get(
905  Objects_Id         id,
906  ISR_lock_Context  *lock_context
907);
908
909/**
910 * @brief Gets the identifier of the calling thread.
911 *
912 * @return Returns the identifier of the calling thread.
913 */
914Objects_Id _Thread_Self_id( void );
915
916/**
917 * @brief Gets the cpu of the thread's scheduler.
918 *
919 * @param thread The thread.
920 *
921 * @return The cpu of the thread's scheduler.
922 */
923RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
924  const Thread_Control *thread
925)
926{
927#if defined(RTEMS_SMP)
928  return thread->Scheduler.cpu;
929#else
930  (void) thread;
931
932  return _Per_CPU_Get();
933#endif
934}
935
936/**
937 * @brief Sets the cpu of the thread's scheduler.
938 *
939 * @param[out] thread The thread.
940 * @param cpu The cpu to set.
941 */
942RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
943  Thread_Control *thread,
944  Per_CPU_Control *cpu
945)
946{
947#if defined(RTEMS_SMP)
948  thread->Scheduler.cpu = cpu;
949#else
950  (void) thread;
951  (void) cpu;
952#endif
953}
954
955/**
956 * @brief Checks if the thread is the currently executing thread.
957 *
958 * This function returns true if the_thread is the currently executing
959 * thread, and false otherwise.
960 *
961 * @param the_thread The thread to verify if it is the currently executing thread.
962 *
963 * @retval true @a the_thread is the currently executing one.
964 * @retval false @a the_thread is not the currently executing one.
965 */
966RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
967  const Thread_Control *the_thread
968)
969{
970  return ( the_thread == _Thread_Executing );
971}
972
973#if defined(RTEMS_SMP)
974/**
975 * @brief Checks if the thread executes currently on some processor in the
976 * system.
977 *
978 * Do not confuse this with _Thread_Is_executing() which checks only the
979 * current processor.
980 *
981 * @param the_thread The thread for the verification.
982 *
983 * @retval true @a the_thread is the currently executing one.
984 * @retval false @a the_thread is not the currently executing one.
985 */
986RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
987  const Thread_Control *the_thread
988)
989{
990  return _CPU_Context_Get_is_executing( &the_thread->Registers );
991}
992#endif
993
994/**
995 * @brief Checks if the thread is the heir.
996 *
997 * This function returns true if the_thread is the heir
998 * thread, and false otherwise.
999 *
1000 * @param the_thread The thread for the verification.
1001 *
1002 * @retval true @a the_thread is the heir.
1003 * @retval false @a the_thread is not the heir.
1004 */
1005RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
1006  const Thread_Control *the_thread
1007)
1008{
1009  return ( the_thread == _Thread_Heir );
1010}
1011
1012/**
1013 * @brief Unblocks the thread.
1014 *
1015 * This routine clears any blocking state for the_thread.  It performs
1016 * any necessary scheduling operations including the selection of
1017 * a new heir thread.
1018 *
1019 * @param[in, out] the_thread The thread to unblock.
1020 */
1021RTEMS_INLINE_ROUTINE void _Thread_Unblock (
1022  Thread_Control *the_thread
1023)
1024{
1025  _Thread_Clear_state( the_thread, STATES_BLOCKED );
1026}
1027
1028/**
1029 * @brief Checks if the floating point context of the thread is currently
1030 *      loaded in the floating point unit.
1031 *
1032 * This function returns true if the floating point context of
1033 * the_thread is currently loaded in the floating point unit, and
1034 * false otherwise.
1035 *
1036 * @param the_thread The thread for the verification.
1037 *
1038 * @retval true The floating point context of @a the_thread is currently
1039 *      loaded in the floating point unit.
1040 * @retval false The floating point context of @a the_thread is currently not
1041 *      loaded in the floating point unit.
1042 */
1043#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1044RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
1045  const Thread_Control *the_thread
1046)
1047{
1048  return ( the_thread == _Thread_Allocated_fp );
1049}
1050#endif
1051
1052/*
1053 * If the CPU has hardware floating point, then we must address saving
1054 * and restoring it as part of the context switch.
1055 *
1056 * The second conditional compilation section selects the algorithm used
1057 * to context switch between floating point tasks.  The deferred algorithm
1058 * can be significantly better in a system with few floating point tasks
1059 * because it reduces the total number of save and restore FP context
1060 * operations.  However, this algorithm can not be used on all CPUs due
1061 * to unpredictable use of FP registers by some compilers for integer
1062 * operations.
1063 */
1064
1065/**
1066 * @brief Saves the executing thread's floating point area.
1067 *
1068 * @param executing The currently executing thread.
1069 */
1070RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
1071{
1072#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1073#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
1074  if ( executing->fp_context != NULL )
1075    _Context_Save_fp( &executing->fp_context );
1076#endif
1077#endif
1078}
1079
1080/**
1081 * @brief Restores the executing thread's floating point area.
1082 *
1083 * @param executing The currently executing thread.
1084 */
1085RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
1086{
1087#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1088#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
1089  if ( (executing->fp_context != NULL) &&
1090       !_Thread_Is_allocated_fp( executing ) ) {
1091    if ( _Thread_Allocated_fp != NULL )
1092      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
1093    _Context_Restore_fp( &executing->fp_context );
1094    _Thread_Allocated_fp = executing;
1095  }
1096#else
1097  if ( executing->fp_context != NULL )
1098    _Context_Restore_fp( &executing->fp_context );
1099#endif
1100#endif
1101}
1102
1103/**
1104 * @brief Deallocates the currently loaded floating point context.
1105 *
1106 * This routine is invoked when the currently loaded floating
1107 * point context is now longer associated with an active thread.
1108 */
1109#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1110RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
1111{
1112  _Thread_Allocated_fp = NULL;
1113}
1114#endif
1115
1116/**
1117 * @brief Checks if dispatching is disabled.
1118 *
1119 * This function returns true if dispatching is disabled, and false
1120 * otherwise.
1121 *
1122 * @retval true Dispatching is disabled.
1123 * @retval false Dispatching is enabled.
1124 */
1125RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
1126{
1127  return ( _Thread_Dispatch_necessary );
1128}
1129
1130/**
1131 * @brief Gets the maximum number of internal threads.
1132 *
1133 * @return The maximum number of internal threads.
1134 */
1135RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
1136{
1137  /* Idle threads */
1138  uint32_t maximum_internal_threads =
1139    rtems_configuration_get_maximum_processors();
1140
1141  /* MPCI thread */
1142#if defined(RTEMS_MULTIPROCESSING)
1143  if ( _System_state_Is_multiprocessing ) {
1144    ++maximum_internal_threads;
1145  }
1146#endif
1147
1148  return maximum_internal_threads;
1149}
1150
1151/**
1152 * @brief Allocates an internal thread and returns it.
1153 *
1154 * @retval pointer Pointer to the allocated Thread_Control.
1155 * @retval NULL The operation failed.
1156 */
1157RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
1158{
1159  return (Thread_Control *)
1160    _Objects_Allocate_unprotected( &_Thread_Information.Objects );
1161}
1162
1163/**
1164 * @brief Gets the heir of the processor and makes it executing.
1165 *
1166 * Must be called with interrupts disabled.  The thread dispatch necessary
1167 * indicator is cleared as a side-effect.
1168 *
1169 * @param[in, out] cpu_self The processor to get the heir of.
1170 *
1171 * @return The heir thread.
1172 *
1173 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
1174 * _Thread_Dispatch_update_heir().
1175 */
1176RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
1177  Per_CPU_Control *cpu_self
1178)
1179{
1180  Thread_Control *heir;
1181
1182  heir = cpu_self->heir;
1183  cpu_self->dispatch_necessary = false;
1184  cpu_self->executing = heir;
1185
1186  return heir;
1187}
1188
1189/**
1190 * @brief Updates the cpu time used of the thread.
1191 *
1192 * @param[in, out] the_thread The thread to add additional cpu time that is
1193 *      used.
1194 * @param cpu The cpu.
1195 */
1196RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
1197  Thread_Control  *the_thread,
1198  Per_CPU_Control *cpu
1199)
1200{
1201  Timestamp_Control last;
1202  Timestamp_Control ran;
1203
1204  last = cpu->cpu_usage_timestamp;
1205  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
1206  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
1207  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
1208}
1209
1210/**
1211 * @brief Updates the used cpu time for the heir and dispatches a new heir.
1212 *
1213 * @param[in, out] cpu_self The current processor.
1214 * @param[in, out] cpu_for_heir The processor to do a dispatch on.
1215 * @param heir The new heir for @a cpu_for_heir.
1216 */
1217#if defined( RTEMS_SMP )
1218RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
1219  Per_CPU_Control *cpu_self,
1220  Per_CPU_Control *cpu_for_heir,
1221  Thread_Control  *heir
1222)
1223{
1224  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
1225
1226  cpu_for_heir->heir = heir;
1227
1228  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
1229}
1230#endif
1231
1232/**
1233 * @brief Gets the used cpu time of the thread and stores it in the given
1234 *      Timestamp_Control.
1235 *
1236 * @param the_thread The thread to get the used cpu time of.
1237 * @param[out] cpu_time_used Stores the used cpu time of @a the_thread.
1238 */
1239void _Thread_Get_CPU_time_used(
1240  Thread_Control    *the_thread,
1241  Timestamp_Control *cpu_time_used
1242);
1243
1244/**
1245 * @brief Initializes the control chain of the action control.
1246 *
1247 * @param[out] action_control The action control to initialize.
1248 */
1249RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
1250  Thread_Action_control *action_control
1251)
1252{
1253  _Chain_Initialize_empty( &action_control->Chain );
1254}
1255
1256/**
1257 * @brief Initializes the Thread action.
1258 *
1259 * @param[out] action The Thread_Action to initialize.
1260 */
1261RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
1262  Thread_Action *action
1263)
1264{
1265  _Chain_Set_off_chain( &action->Node );
1266}
1267
1268/**
1269 * @brief Adds the post switch action to the thread.
1270 *
1271 * The caller shall own the thread state lock.  A thread dispatch is
1272 * requested.
1273 *
1274 * @param[in, out] the_thread is the thread of the action.
1275 *
1276 * @param[in, out] action is the action to add.
1277 *
1278 * @param handler is the handler for the action.
1279 */
1280RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
1281  Thread_Control        *the_thread,
1282  Thread_Action         *action,
1283  Thread_Action_handler  handler
1284)
1285{
1286  Per_CPU_Control *cpu_of_thread;
1287
1288  _Assert( _Thread_State_is_owner( the_thread ) );
1289
1290  cpu_of_thread = _Thread_Get_CPU( the_thread );
1291
1292  action->handler = handler;
1293
1294  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
1295
1296  _Chain_Append_if_is_off_chain_unprotected(
1297    &the_thread->Post_switch_actions.Chain,
1298    &action->Node
1299  );
1300}
1301
1302/**
1303 * @brief Appends the post switch action to the thread.
1304 *
1305 * The caller shall own the thread state lock.  The action shall be inactive.
1306 * The handler of the action shall be already set.  A thread dispatch is not
1307 * requested.
1308 *
1309 * @param[in, out] the_thread is the thread of the action.
1310 *
1311 * @param[in, out] action is the action to add.
1312 */
1313RTEMS_INLINE_ROUTINE void _Thread_Append_post_switch_action(
1314  Thread_Control *the_thread,
1315  Thread_Action  *action
1316)
1317{
1318  _Assert( _Thread_State_is_owner( the_thread ) );
1319  _Assert( action->handler != NULL );
1320
1321  _Chain_Append_unprotected(
1322    &the_thread->Post_switch_actions.Chain,
1323    &action->Node
1324  );
1325}
1326
1327/**
1328 * @brief Checks if the thread life state is restarting.
1329 *
1330 * @param life_state The thread life state for the verification.
1331 *
1332 * @retval true @a life_state is restarting.
1333 * @retval false @a life_state is not restarting.
1334 */
1335RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
1336  Thread_Life_state life_state
1337)
1338{
1339  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
1340}
1341
1342/**
1343 * @brief Checks if the thread life state is terminating.
1344 *
1345 * @param life_state The thread life state for the verification.
1346 *
1347 * @retval true @a life_state is terminating.
1348 * @retval false @a life_state is not terminating.
1349 */
1350RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
1351  Thread_Life_state life_state
1352)
1353{
1354  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
1355}
1356
1357/**
1358 * @brief Checks if the thread life state allos life change.
1359 *
1360 * @param life_state The thread life state for the verification.
1361 *
1362 * @retval true @a life_state allows life change.
1363 * @retval false @a life_state does not allow life change.
1364 */
1365RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
1366  Thread_Life_state life_state
1367)
1368{
1369  return ( life_state
1370    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
1371}
1372
1373/**
1374 * @brief Checks if the thread life state is life changing.
1375 *
1376 * @param life_state The thread life state for the verification.
1377 *
1378 * @retval true @a life_state is life changing.
1379 * @retval false @a life_state is not life changing.
1380 */
1381RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
1382  Thread_Life_state life_state
1383)
1384{
1385  return ( life_state
1386    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
1387}
1388
1389/**
1390 * @brief Checks if the thread is joinable.
1391 *
1392 * @param the_thread The thread for the verification.
1393 *
1394 * @retval true @a life_state is joinable.
1395 * @retval false @a life_state is not joinable.
1396 */
1397RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
1398  const Thread_Control *the_thread
1399)
1400{
1401  _Assert( _Thread_State_is_owner( the_thread ) );
1402  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
1403}
1404
1405/**
1406 * @brief Increments the thread's resource count.
1407 *
1408 * @param[in, out] the_thread The thread to increase the resource count of.
1409 */
1410RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
1411  Thread_Control *the_thread
1412)
1413{
1414#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1415  ++the_thread->resource_count;
1416#else
1417  (void) the_thread;
1418#endif
1419}
1420
1421/**
1422 * @brief Decrements the thread's resource count.
1423 *
1424 * @param[in, out] the_thread The thread to decrement the resource count of.
1425 */
1426RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
1427  Thread_Control *the_thread
1428)
1429{
1430#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1431  --the_thread->resource_count;
1432#else
1433  (void) the_thread;
1434#endif
1435}
1436
1437#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1438/**
1439 * @brief Checks if the thread owns resources.
1440 *
1441 * Resources are accounted with the Thread_Control::resource_count resource
1442 * counter.  This counter is used by mutex objects for example.
1443 *
1444 * @param the_thread The thread.
1445 *
1446 * @retval true The thread owns resources.
1447 * @retval false The thread does not own resources.
1448 */
1449RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
1450  const Thread_Control *the_thread
1451)
1452{
1453  return the_thread->resource_count != 0;
1454}
1455#endif
1456
1457#if defined(RTEMS_SMP)
1458/**
1459 * @brief Cancels the thread's need for help.
1460 *
1461 * @param the_thread The thread to cancel the help request of.
1462 * @param cpu The cpu to get the lock context of in order to
1463 *      cancel the help request.
1464 */
1465RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
1466  Thread_Control  *the_thread,
1467  Per_CPU_Control *cpu
1468)
1469{
1470  ISR_lock_Context lock_context;
1471
1472  _Per_CPU_Acquire( cpu, &lock_context );
1473
1474  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
1475    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
1476    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
1477  }
1478
1479  _Per_CPU_Release( cpu, &lock_context );
1480}
1481#endif
1482
1483/**
1484 * @brief Gets the home scheduler of the thread.
1485 *
1486 * @param the_thread The thread to get the home scheduler of.
1487 *
1488 * @return The thread's home scheduler.
1489 */
1490RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
1491  const Thread_Control *the_thread
1492)
1493{
1494#if defined(RTEMS_SMP)
1495  return the_thread->Scheduler.home_scheduler;
1496#else
1497  (void) the_thread;
1498  return &_Scheduler_Table[ 0 ];
1499#endif
1500}
1501
1502/**
1503 * @brief Gets the scheduler's home node.
1504 *
1505 * @param the_thread The thread to get the home node of.
1506 *
1507 * @return The thread's home node.
1508 */
1509RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1510  const Thread_Control *the_thread
1511)
1512{
1513#if defined(RTEMS_SMP)
1514  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1515  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1516    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1517  );
1518#else
1519  return the_thread->Scheduler.nodes;
1520#endif
1521}
1522
1523/**
1524 * @brief Gets the thread's scheduler node by index.
1525 *
1526 * @param the_thread The thread of which to get a scheduler node.
1527 * @param scheduler_index The index of the desired scheduler node.
1528 *
1529 * @return The scheduler node with the specified index.
1530 */
1531RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1532  const Thread_Control *the_thread,
1533  size_t                scheduler_index
1534)
1535{
1536#if defined(RTEMS_SMP)
1537  return (Scheduler_Node *)
1538    ( (uintptr_t) the_thread->Scheduler.nodes
1539      + scheduler_index * _Scheduler_Node_size );
1540#else
1541  _Assert( scheduler_index == 0 );
1542  (void) scheduler_index;
1543  return the_thread->Scheduler.nodes;
1544#endif
1545}
1546
1547#if defined(RTEMS_SMP)
1548/**
1549 * @brief Acquires the lock context in a critical section.
1550 *
1551 * @param the_thread The thread to acquire the lock context.
1552 * @param lock_context The lock context.
1553 */
1554RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1555  Thread_Control   *the_thread,
1556  ISR_lock_Context *lock_context
1557)
1558{
1559  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1560}
1561
1562/**
1563 * @brief Releases the lock context in a critical section.
1564 *
1565 * @param the_thread The thread to release the lock context.
1566 * @param lock_context The lock context.
1567 */
1568RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1569  Thread_Control   *the_thread,
1570  ISR_lock_Context *lock_context
1571)
1572{
1573  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1574}
1575
1576/**
1577 * @brief Process the thread's scheduler requests.
1578 *
1579 * @param[in, out] the_thread The thread for the operation.
1580 */
1581void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1582
1583/**
1584 * @brief Add a scheduler request to the thread.
1585 *
1586 * @param[in, out] the_thread The thread to add a scheduler request to.
1587 * @param[in, out] scheduler_node The scheduler node for the request.
1588 * @param request The request to add.
1589 */
1590RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1591  Thread_Control         *the_thread,
1592  Scheduler_Node         *scheduler_node,
1593  Scheduler_Node_request  request
1594)
1595{
1596  ISR_lock_Context       lock_context;
1597  Scheduler_Node_request current_request;
1598
1599  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1600
1601  current_request = scheduler_node->Thread.request;
1602
1603  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1604    _Assert(
1605      request == SCHEDULER_NODE_REQUEST_ADD
1606        || request == SCHEDULER_NODE_REQUEST_REMOVE
1607    );
1608    _Assert( scheduler_node->Thread.next_request == NULL );
1609    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1610    the_thread->Scheduler.requests = scheduler_node;
1611  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1612    _Assert(
1613      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1614        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1615      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1616        && request == SCHEDULER_NODE_REQUEST_ADD )
1617    );
1618    request = SCHEDULER_NODE_REQUEST_NOTHING;
1619  }
1620
1621  scheduler_node->Thread.request = request;
1622
1623  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1624}
1625
1626/**
1627 * @brief Adds a wait node to the thread and adds a corresponding
1628 *      request to the thread.
1629 *
1630 * @param[in, out] the_thread The thread to add the wait node to.
1631 * @param scheduler_node The scheduler node which provides the wait node.
1632 */
1633RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1634  Thread_Control *the_thread,
1635  Scheduler_Node *scheduler_node
1636)
1637{
1638  _Chain_Append_unprotected(
1639    &the_thread->Scheduler.Wait_nodes,
1640    &scheduler_node->Thread.Wait_node
1641  );
1642  _Thread_Scheduler_add_request(
1643    the_thread,
1644    scheduler_node,
1645    SCHEDULER_NODE_REQUEST_ADD
1646  );
1647}
1648
1649/**
1650 * @brief Remove a wait node from the thread and add a corresponding request to
1651 *      it.
1652 *
1653 * @param the_thread The thread to add the request to remove a wait node.
1654 * @param scheduler_node The scheduler node to remove a wait node from.
1655 */
1656RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1657  Thread_Control *the_thread,
1658  Scheduler_Node *scheduler_node
1659)
1660{
1661  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1662  _Thread_Scheduler_add_request(
1663    the_thread,
1664    scheduler_node,
1665    SCHEDULER_NODE_REQUEST_REMOVE
1666  );
1667}
1668#endif
1669
1670/**
1671 * @brief Returns the priority of the thread.
1672 *
1673 * Returns the user API and thread wait information relevant thread priority.
1674 * This includes temporary thread priority adjustments due to locking
1675 * protocols, a job release or the POSIX sporadic server for example.
1676 *
1677 * @param the_thread The thread of which to get the priority.
1678 *
1679 * @return The priority of the thread.
1680 */
1681RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1682  const Thread_Control *the_thread
1683)
1684{
1685  Scheduler_Node *scheduler_node;
1686
1687  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1688  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1689}
1690
1691/**
1692 * @brief Returns the unmapped priority of the thread.
1693 *
1694 * @param the_thread The thread of which to get the unmapped priority.
1695 *
1696 * @return The unmapped priority of the thread.
1697 */
1698RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_unmapped_priority(
1699  const Thread_Control *the_thread
1700)
1701{
1702  return SCHEDULER_PRIORITY_UNMAP( _Thread_Get_priority( the_thread ) );
1703}
1704
1705/**
1706 * @brief Returns the unmapped real priority of the thread.
1707 *
1708 * @param the_thread The thread of which to get the unmapped real priority.
1709 *
1710 * @return The unmapped real priority of the thread.
1711 */
1712RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_unmapped_real_priority(
1713  const Thread_Control *the_thread
1714)
1715{
1716  return SCHEDULER_PRIORITY_UNMAP( the_thread->Real_priority.priority );
1717}
1718
1719/**
1720 * @brief Acquires the thread wait default lock inside a critical section
1721 * (interrupts disabled).
1722 *
1723 * @param[in, out] the_thread The thread.
1724 * @param lock_context The lock context used for the corresponding lock
1725 *   release.
1726 *
1727 * @see _Thread_Wait_release_default_critical().
1728 */
1729RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1730  Thread_Control   *the_thread,
1731  ISR_lock_Context *lock_context
1732)
1733{
1734  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1735}
1736
1737/**
1738 * @brief Acquires the thread wait default lock and returns the executing
1739 * thread.
1740 *
1741 * @param lock_context The lock context used for the corresponding lock
1742 *   release.
1743 *
1744 * @return The executing thread.
1745 *
1746 * @see _Thread_Wait_release_default().
1747 */
1748RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1749  ISR_lock_Context *lock_context
1750)
1751{
1752  Thread_Control *executing;
1753
1754  _ISR_lock_ISR_disable( lock_context );
1755  executing = _Thread_Executing;
1756  _Thread_Wait_acquire_default_critical( executing, lock_context );
1757
1758  return executing;
1759}
1760
1761/**
1762 * @brief Acquires the thread wait default lock and disables interrupts.
1763 *
1764 * @param[in, out] the_thread The thread.
1765 * @param[out] lock_context The lock context used for the corresponding lock
1766 *   release.
1767 *
1768 * @see _Thread_Wait_release_default().
1769 */
1770RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1771  Thread_Control   *the_thread,
1772  ISR_lock_Context *lock_context
1773)
1774{
1775  _ISR_lock_ISR_disable( lock_context );
1776  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1777}
1778
1779/**
1780 * @brief Releases the thread wait default lock inside a critical section
1781 * (interrupts disabled).
1782 *
1783 * The previous interrupt status is not restored.
1784 *
1785 * @param[in, out] the_thread The thread.
1786 * @param lock_context The lock context used for the corresponding lock
1787 *   acquire.
1788 */
1789RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1790  Thread_Control   *the_thread,
1791  ISR_lock_Context *lock_context
1792)
1793{
1794  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1795}
1796
1797/**
1798 * @brief Releases the thread wait default lock and restores the previous
1799 * interrupt status.
1800 *
1801 * @param[in, out] the_thread The thread.
1802 * @param[out] lock_context The lock context used for the corresponding lock
1803 *   acquire.
1804 */
1805RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1806  Thread_Control   *the_thread,
1807  ISR_lock_Context *lock_context
1808)
1809{
1810  _Thread_Wait_release_default_critical( the_thread, lock_context );
1811  _ISR_lock_ISR_enable( lock_context );
1812}
1813
1814#if defined(RTEMS_SMP)
1815#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1816  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1817
1818/**
1819 * @brief Removes the first pending wait lock request.
1820 *
1821 * @param the_thread The thread to remove the request from.
1822 * @param queue_lock_context The queue lock context.
1823 */
1824RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1825  Thread_Control            *the_thread,
1826  Thread_queue_Lock_context *queue_lock_context
1827)
1828{
1829  Chain_Node *first;
1830
1831  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1832  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1833
1834  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1835    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1836  }
1837}
1838
1839/**
1840 * @brief Acquires the wait queue inside a critical section.
1841 *
1842 * @param queue The queue that acquires.
1843 * @param queue_lock_context The queue lock context.
1844 */
1845RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1846  Thread_queue_Queue        *queue,
1847  Thread_queue_Lock_context *queue_lock_context
1848)
1849{
1850  _Thread_queue_Queue_acquire_critical(
1851    queue,
1852    &_Thread_Executing->Potpourri_stats,
1853    &queue_lock_context->Lock_context
1854  );
1855}
1856
1857/**
1858 * @brief Releases the wait queue inside a critical section.
1859 *
1860 * @param queue The queue that releases.
1861 * @param queue_lock_context The queue lock context.
1862 */
1863RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1864  Thread_queue_Queue        *queue,
1865  Thread_queue_Lock_context *queue_lock_context
1866)
1867{
1868  _Thread_queue_Queue_release_critical(
1869    queue,
1870    &queue_lock_context->Lock_context
1871  );
1872}
1873#endif
1874
1875/**
1876 * @brief Acquires the thread wait lock inside a critical section (interrupts
1877 * disabled).
1878 *
1879 * @param[in, out] the_thread The thread.
1880 * @param[in, out] queue_context The thread queue context for the corresponding
1881 *   _Thread_Wait_release_critical().
1882 */
1883RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1884  Thread_Control       *the_thread,
1885  Thread_queue_Context *queue_context
1886)
1887{
1888#if defined(RTEMS_SMP)
1889  Thread_queue_Queue *queue;
1890
1891  _Thread_Wait_acquire_default_critical(
1892    the_thread,
1893    &queue_context->Lock_context.Lock_context
1894  );
1895
1896  queue = the_thread->Wait.queue;
1897  queue_context->Lock_context.Wait.queue = queue;
1898
1899  if ( queue != NULL ) {
1900    _Thread_queue_Gate_add(
1901      &the_thread->Wait.Lock.Pending_requests,
1902      &queue_context->Lock_context.Wait.Gate
1903    );
1904    _Thread_Wait_release_default_critical(
1905      the_thread,
1906      &queue_context->Lock_context.Lock_context
1907    );
1908    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1909
1910    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1911      _Thread_Wait_release_queue_critical(
1912        queue,
1913        &queue_context->Lock_context
1914      );
1915      _Thread_Wait_acquire_default_critical(
1916        the_thread,
1917        &queue_context->Lock_context.Lock_context
1918      );
1919      _Thread_Wait_remove_request_locked(
1920        the_thread,
1921        &queue_context->Lock_context
1922      );
1923      _Assert( the_thread->Wait.queue == NULL );
1924    }
1925  }
1926#else
1927  (void) the_thread;
1928  (void) queue_context;
1929#endif
1930}
1931
1932/**
1933 * @brief Acquires the thread wait default lock and disables interrupts.
1934 *
1935 * @param[in, out] the_thread The thread.
1936 * @param[in, out] queue_context The thread queue context for the corresponding
1937 *   _Thread_Wait_release().
1938 */
1939RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1940  Thread_Control       *the_thread,
1941  Thread_queue_Context *queue_context
1942)
1943{
1944  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1945  _Thread_Wait_acquire_critical( the_thread, queue_context );
1946}
1947
1948/**
1949 * @brief Releases the thread wait lock inside a critical section (interrupts
1950 * disabled).
1951 *
1952 * The previous interrupt status is not restored.
1953 *
1954 * @param[in, out] the_thread The thread.
1955 * @param[in, out] queue_context The thread queue context used for corresponding
1956 *   _Thread_Wait_acquire_critical().
1957 */
1958RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1959  Thread_Control       *the_thread,
1960  Thread_queue_Context *queue_context
1961)
1962{
1963#if defined(RTEMS_SMP)
1964  Thread_queue_Queue *queue;
1965
1966  queue = queue_context->Lock_context.Wait.queue;
1967
1968  if ( queue != NULL ) {
1969    _Thread_Wait_release_queue_critical(
1970      queue, &queue_context->Lock_context
1971    );
1972    _Thread_Wait_acquire_default_critical(
1973      the_thread,
1974      &queue_context->Lock_context.Lock_context
1975    );
1976    _Thread_Wait_remove_request_locked(
1977      the_thread,
1978      &queue_context->Lock_context
1979    );
1980  }
1981
1982  _Thread_Wait_release_default_critical(
1983    the_thread,
1984    &queue_context->Lock_context.Lock_context
1985  );
1986#else
1987  (void) the_thread;
1988  (void) queue_context;
1989#endif
1990}
1991
1992/**
1993 * @brief Releases the thread wait lock and restores the previous interrupt
1994 * status.
1995 *
1996 * @param[in, out] the_thread The thread.
1997 * @param[in, out] queue_context The thread queue context used for corresponding
1998 *   _Thread_Wait_acquire().
1999 */
2000RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
2001  Thread_Control       *the_thread,
2002  Thread_queue_Context *queue_context
2003)
2004{
2005  _Thread_Wait_release_critical( the_thread, queue_context );
2006  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
2007}
2008
2009/**
2010 * @brief Claims the thread wait queue.
2011 *
2012 * The caller must not be the owner of the default thread wait lock.  The
2013 * caller must be the owner of the corresponding thread queue lock.  The
2014 * registration of the corresponding thread queue operations is deferred and
2015 * done after the deadlock detection.  This is crucial to support timeouts on
2016 * SMP configurations.
2017 *
2018 * @param[in, out] the_thread The thread.
2019 * @param[in, out] queue The new thread queue.
2020 *
2021 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
2022 */
2023RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
2024  Thread_Control     *the_thread,
2025  Thread_queue_Queue *queue
2026)
2027{
2028  ISR_lock_Context lock_context;
2029
2030  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
2031
2032  _Assert( the_thread->Wait.queue == NULL );
2033
2034#if defined(RTEMS_SMP)
2035  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
2036  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
2037  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
2038#endif
2039
2040  the_thread->Wait.queue = queue;
2041
2042  _Thread_Wait_release_default_critical( the_thread, &lock_context );
2043}
2044
2045/**
2046 * @brief Finalizes the thread wait queue claim via registration of the
2047 * corresponding thread queue operations.
2048 *
2049 * @param[in, out] the_thread The thread.
2050 * @param operations The corresponding thread queue operations.
2051 */
2052RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
2053  Thread_Control                *the_thread,
2054  const Thread_queue_Operations *operations
2055)
2056{
2057  the_thread->Wait.operations = operations;
2058}
2059
2060/**
2061 * @brief Removes a thread wait lock request.
2062 *
2063 * On SMP configurations, removes a thread wait lock request.
2064 *
2065 * On other configurations, this function does nothing.
2066 *
2067 * @param[in, out] the_thread The thread.
2068 * @param[in, out] queue_lock_context The thread queue lock context used for
2069 *   corresponding _Thread_Wait_acquire().
2070 */
2071RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
2072  Thread_Control            *the_thread,
2073  Thread_queue_Lock_context *queue_lock_context
2074)
2075{
2076#if defined(RTEMS_SMP)
2077  ISR_lock_Context lock_context;
2078
2079  _Thread_Wait_acquire_default( the_thread, &lock_context );
2080  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
2081  _Thread_Wait_release_default( the_thread, &lock_context );
2082#else
2083  (void) the_thread;
2084  (void) queue_lock_context;
2085#endif
2086}
2087
2088/**
2089 * @brief Restores the default thread wait queue and operations.
2090 *
2091 * The caller must be the owner of the current thread wait queue lock.
2092 *
2093 * On SMP configurations, the pending requests are updated to use the stale
2094 * thread queue operations.
2095 *
2096 * @param[in, out] the_thread The thread.
2097 *
2098 * @see _Thread_Wait_claim().
2099 */
2100RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
2101  Thread_Control *the_thread
2102)
2103{
2104#if defined(RTEMS_SMP)
2105  ISR_lock_Context  lock_context;
2106  Chain_Node       *node;
2107  const Chain_Node *tail;
2108
2109  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
2110
2111  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
2112  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
2113
2114  if ( node != tail ) {
2115    do {
2116      Thread_queue_Context *queue_context;
2117
2118      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
2119      queue_context->Lock_context.Wait.queue = NULL;
2120
2121      node = _Chain_Next( node );
2122    } while ( node != tail );
2123
2124    _Thread_queue_Gate_add(
2125      &the_thread->Wait.Lock.Pending_requests,
2126      &the_thread->Wait.Lock.Tranquilizer
2127    );
2128  } else {
2129    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
2130  }
2131#endif
2132
2133  the_thread->Wait.queue = NULL;
2134  the_thread->Wait.operations = &_Thread_queue_Operations_default;
2135
2136#if defined(RTEMS_SMP)
2137  _Thread_Wait_release_default_critical( the_thread, &lock_context );
2138#endif
2139}
2140
2141/**
2142 * @brief Tranquilizes the thread after a wait on a thread queue.
2143 *
2144 * After the violent blocking procedure this function makes the thread calm and
2145 * peaceful again so that it can carry out its normal work.
2146 *
2147 * On SMP configurations, ensures that all pending thread wait lock requests
2148 * completed before the thread is able to begin a new thread wait procedure.
2149 *
2150 * On other configurations, this function does nothing.
2151 *
2152 * It must be called after a _Thread_Wait_claim() exactly once
2153 *  - after the corresponding thread queue lock was released, and
2154 *  - the default wait state is restored or some other processor is about to do
2155 *    this.
2156 *
2157 * @param the_thread The thread.
2158 */
2159RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
2160  Thread_Control *the_thread
2161)
2162{
2163#if defined(RTEMS_SMP)
2164  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
2165#else
2166  (void) the_thread;
2167#endif
2168}
2169
2170/**
2171 * @brief Cancels a thread wait on a thread queue.
2172 *
2173 * @param[in, out] the_thread The thread.
2174 * @param queue_context The thread queue context used for corresponding
2175 *   _Thread_Wait_acquire().
2176 */
2177RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
2178  Thread_Control       *the_thread,
2179  Thread_queue_Context *queue_context
2180)
2181{
2182  Thread_queue_Queue *queue;
2183
2184  queue = the_thread->Wait.queue;
2185
2186#if defined(RTEMS_SMP)
2187  if ( queue != NULL ) {
2188    _Assert( queue_context->Lock_context.Wait.queue == queue );
2189#endif
2190
2191    ( *the_thread->Wait.operations->extract )(
2192      queue,
2193      the_thread,
2194      queue_context
2195    );
2196    _Thread_Wait_restore_default( the_thread );
2197
2198#if defined(RTEMS_SMP)
2199    _Assert( queue_context->Lock_context.Wait.queue == NULL );
2200    queue_context->Lock_context.Wait.queue = queue;
2201  }
2202#endif
2203}
2204
2205/**
2206 * @brief The initial thread wait flags value set by _Thread_Initialize().
2207 */
2208#define THREAD_WAIT_FLAGS_INITIAL 0x0U
2209
2210/**
2211 * @brief Mask to get the thread wait state flags.
2212 */
2213#define THREAD_WAIT_STATE_MASK 0xffU
2214
2215/**
2216 * @brief Indicates that the thread begins with the blocking operation.
2217 *
2218 * A blocking operation consists of an optional watchdog initialization and the
2219 * setting of the appropriate thread blocking state with the corresponding
2220 * scheduler block operation.
2221 */
2222#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
2223
2224/**
2225 * @brief Indicates that the thread completed the blocking operation.
2226 */
2227#define THREAD_WAIT_STATE_BLOCKED 0x2U
2228
2229/**
2230 * @brief Indicates that a condition to end the thread wait occurred.
2231 *
2232 * This could be a timeout, a signal, an event or a resource availability.
2233 */
2234#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
2235
2236/**
2237 * @brief Mask to get the thread wait class flags.
2238 */
2239#define THREAD_WAIT_CLASS_MASK 0xff00U
2240
2241/**
2242 * @brief Indicates that the thread waits for an event.
2243 */
2244#define THREAD_WAIT_CLASS_EVENT 0x100U
2245
2246/**
2247 * @brief Indicates that the thread waits for a system event.
2248 */
2249#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
2250
2251/**
2252 * @brief Indicates that the thread waits for an object.
2253 */
2254#define THREAD_WAIT_CLASS_OBJECT 0x400U
2255
2256/**
2257 * @brief Indicates that the thread waits for a period.
2258 */
2259#define THREAD_WAIT_CLASS_PERIOD 0x800U
2260
2261/**
2262 * @brief Sets the thread's wait flags.
2263 *
2264 * @param[in, out] the_thread The thread to set the wait flags of.
2265 * @param flags The flags to set.
2266 */
2267RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
2268  Thread_Control    *the_thread,
2269  Thread_Wait_flags  flags
2270)
2271{
2272#if defined(RTEMS_SMP)
2273  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
2274#else
2275  the_thread->Wait.flags = flags;
2276#endif
2277}
2278
2279/**
2280 * @brief Gets the thread's wait flags according to the ATOMIC_ORDER_RELAXED.
2281 *
2282 * @param the_thread The thread to get the wait flags of.
2283 *
2284 * @return The thread's wait flags.
2285 */
2286RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
2287  const Thread_Control *the_thread
2288)
2289{
2290#if defined(RTEMS_SMP)
2291  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
2292#else
2293  return the_thread->Wait.flags;
2294#endif
2295}
2296
2297/**
2298 * @brief Gets the thread's wait flags according to the ATOMIC_ORDER_ACQUIRE.
2299 *
2300 * @param the_thread The thread to get the wait flags of.
2301 *
2302 * @return The thread's wait flags.
2303 */
2304RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
2305  const Thread_Control *the_thread
2306)
2307{
2308#if defined(RTEMS_SMP)
2309  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
2310#else
2311  return the_thread->Wait.flags;
2312#endif
2313}
2314
2315/**
2316 * @brief Tries to change the thread wait flags with release semantics in case
2317 * of success.
2318 *
2319 * Must be called inside a critical section (interrupts disabled).
2320 *
2321 * In case the wait flags are equal to the expected wait flags, then the wait
2322 * flags are set to the desired wait flags.
2323 *
2324 * @param the_thread The thread.
2325 * @param expected_flags The expected wait flags.
2326 * @param desired_flags The desired wait flags.
2327 *
2328 * @retval true The wait flags were equal to the expected wait flags.
2329 * @retval false The wait flags were not equal to the expected wait flags.
2330 */
2331RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
2332  Thread_Control    *the_thread,
2333  Thread_Wait_flags  expected_flags,
2334  Thread_Wait_flags  desired_flags
2335)
2336{
2337  _Assert( _ISR_Get_level() != 0 );
2338
2339#if defined(RTEMS_SMP)
2340  return _Atomic_Compare_exchange_uint(
2341    &the_thread->Wait.flags,
2342    &expected_flags,
2343    desired_flags,
2344    ATOMIC_ORDER_RELEASE,
2345    ATOMIC_ORDER_RELAXED
2346  );
2347#else
2348  bool success = ( the_thread->Wait.flags == expected_flags );
2349
2350  if ( success ) {
2351    the_thread->Wait.flags = desired_flags;
2352  }
2353
2354  return success;
2355#endif
2356}
2357
2358/**
2359 * @brief Tries to change the thread wait flags with acquire semantics.
2360 *
2361 * In case the wait flags are equal to the expected wait flags, then the wait
2362 * flags are set to the desired wait flags.
2363 *
2364 * @param the_thread The thread.
2365 * @param expected_flags The expected wait flags.
2366 * @param desired_flags The desired wait flags.
2367 *
2368 * @retval true The wait flags were equal to the expected wait flags.
2369 * @retval false The wait flags were not equal to the expected wait flags.
2370 */
2371RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
2372  Thread_Control    *the_thread,
2373  Thread_Wait_flags  expected_flags,
2374  Thread_Wait_flags  desired_flags
2375)
2376{
2377#if defined(RTEMS_SMP)
2378  return _Atomic_Compare_exchange_uint(
2379    &the_thread->Wait.flags,
2380    &expected_flags,
2381    desired_flags,
2382    ATOMIC_ORDER_ACQUIRE,
2383    ATOMIC_ORDER_ACQUIRE
2384  );
2385#else
2386  bool      success;
2387  ISR_Level level;
2388
2389  _ISR_Local_disable( level );
2390
2391  success = _Thread_Wait_flags_try_change_release(
2392    the_thread,
2393    expected_flags,
2394    desired_flags
2395  );
2396
2397  _ISR_Local_enable( level );
2398  return success;
2399#endif
2400}
2401
2402/**
2403 * @brief Returns the object identifier of the object containing the current
2404 * thread wait queue.
2405 *
2406 * This function may be used for debug and system information purposes.  The
2407 * caller must be the owner of the thread lock.
2408 *
2409 * @param the_thread The thread.
2410 *
2411 * @retval 0 The thread waits on no thread queue currently, the thread wait
2412 *   queue is not contained in an object, or the current thread state provides
2413 *   insufficient information, e.g. the thread is in the middle of a blocking
2414 *   operation.
2415 * @retval other The object identifier of the object containing the thread wait
2416 *   queue.
2417 */
2418Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
2419
2420/**
2421 * @brief Get the status of the wait return code of the thread.
2422 *
2423 * @param the_thread The thread to get the status of the wait return code of.
2424 */
2425RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
2426  const Thread_Control *the_thread
2427)
2428{
2429  return (Status_Control) the_thread->Wait.return_code;
2430}
2431
2432/**
2433 * @brief Cancels a blocking operation so that the thread can continue its
2434 * execution.
2435 *
2436 * In case this function actually cancelled the blocking operation, then the
2437 * thread wait return code is set to the specified status.
2438 *
2439 * A specialization of this function is _Thread_Timeout().
2440 *
2441 * @param[in, out] the_thread The thread.
2442 * @param status The thread wait status.
2443 */
2444void _Thread_Continue( Thread_Control *the_thread, Status_Control status );
2445
2446/**
2447 * @brief General purpose thread wait timeout.
2448 *
2449 * @param the_watchdog The thread timer watchdog.
2450 */
2451void _Thread_Timeout( Watchdog_Control *the_watchdog );
2452
2453/**
2454 * @brief Initializes the thread timer.
2455 *
2456 * @param [in, out] timer The timer to initialize.
2457 * @param cpu The cpu for the operation.
2458 */
2459RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
2460  Thread_Timer_information *timer,
2461  Per_CPU_Control          *cpu
2462)
2463{
2464  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
2465  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
2466  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
2467}
2468
2469/**
2470 * @brief Adds timeout ticks to the thread.
2471 *
2472 * @param[in, out] the_thread The thread to add the timeout ticks to.
2473 * @param cpu The cpu for the operation.
2474 * @param ticks The ticks to add to the timeout ticks.
2475 */
2476RTEMS_INLINE_ROUTINE void _Thread_Add_timeout_ticks(
2477  Thread_Control    *the_thread,
2478  Per_CPU_Control   *cpu,
2479  Watchdog_Interval  ticks
2480)
2481{
2482  ISR_lock_Context lock_context;
2483
2484  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2485
2486  the_thread->Timer.header =
2487    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
2488  the_thread->Timer.Watchdog.routine = _Thread_Timeout;
2489  _Watchdog_Per_CPU_insert_ticks( &the_thread->Timer.Watchdog, cpu, ticks );
2490
2491  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2492}
2493
2494/**
2495 * @brief Inserts the cpu's watchdog realtime into the thread's timer.
2496 *
2497 * @param[in, out] the_thread for the operation.
2498 * @param cpu The cpu to get the watchdog header from.
2499 * @param routine The watchdog routine for the thread.
2500 * @param expire Expiration for the watchdog.
2501 */
2502RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_realtime(
2503  Thread_Control                 *the_thread,
2504  Per_CPU_Control                *cpu,
2505  Watchdog_Service_routine_entry  routine,
2506  uint64_t                        expire
2507)
2508{
2509  ISR_lock_Context  lock_context;
2510  Watchdog_Header  *header;
2511
2512  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2513
2514  header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
2515  the_thread->Timer.header = header;
2516  the_thread->Timer.Watchdog.routine = routine;
2517  _Watchdog_Per_CPU_insert( &the_thread->Timer.Watchdog, cpu, header, expire );
2518
2519  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2520}
2521
2522/**
2523 * @brief Remove the watchdog timer from the thread.
2524 *
2525 * @param[in, out] the_thread The thread to remove the watchdog from.
2526 */
2527RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
2528{
2529  ISR_lock_Context lock_context;
2530
2531  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2532
2533  _Watchdog_Per_CPU_remove(
2534    &the_thread->Timer.Watchdog,
2535#if defined(RTEMS_SMP)
2536    the_thread->Timer.Watchdog.cpu,
2537#else
2538    _Per_CPU_Get(),
2539#endif
2540    the_thread->Timer.header
2541  );
2542
2543  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2544}
2545
2546/**
2547 * @brief Remove the watchdog timer from the thread and unblock if necessary.
2548 *
2549 * @param[in, out] the_thread The thread to remove the watchdog from and unblock
2550 *      if necessary.
2551 * @param queue The thread queue.
2552 */
2553RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
2554  Thread_Control     *the_thread,
2555  Thread_queue_Queue *queue
2556)
2557{
2558  _Thread_Wait_tranquilize( the_thread );
2559  _Thread_Timer_remove( the_thread );
2560
2561#if defined(RTEMS_MULTIPROCESSING)
2562  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
2563    _Thread_Unblock( the_thread );
2564  } else {
2565    _Thread_queue_Unblock_proxy( queue, the_thread );
2566  }
2567#else
2568  (void) queue;
2569  _Thread_Unblock( the_thread );
2570#endif
2571}
2572
2573/**
2574 * @brief Sets the name of the thread.
2575 *
2576 * @param[out] the_thread  The thread to change the name of.
2577 * @param name The new name for the thread.
2578 *
2579 * @retval STATUS_SUCCESSFUL The operation succeeded.
2580 * @retval STATUS_RESULT_TOO_LARGE The name was too long.
2581 */
2582Status_Control _Thread_Set_name(
2583  Thread_Control *the_thread,
2584  const char     *name
2585);
2586
2587/**
2588 * @brief Gets the name of the thread.
2589 *
2590 * @param the_thread The thread to get the name of.
2591 * @param[out] buffer Contains the thread's name.
2592 * @param buffer_size The size of @a buffer.
2593 *
2594 * @return The number of bytes copied to @a buffer.
2595 */
2596size_t _Thread_Get_name(
2597  const Thread_Control *the_thread,
2598  char                 *buffer,
2599  size_t                buffer_size
2600);
2601
2602#if defined(RTEMS_SMP)
2603#define THREAD_PIN_STEP 2
2604
2605#define THREAD_PIN_PREEMPTION 1
2606
2607/**
2608 * @brief Unpins the thread.
2609 *
2610 * @param executing The currently executing thread.
2611 * @param cpu_self The cpu for the operation.
2612 */
2613void _Thread_Do_unpin(
2614  Thread_Control  *executing,
2615  Per_CPU_Control *cpu_self
2616);
2617#endif
2618
2619/**
2620 * @brief Pin the executing thread.
2621 *
2622 * @param executing The currently executing thread.
2623 */
2624RTEMS_INLINE_ROUTINE void _Thread_Pin( Thread_Control *executing )
2625{
2626#if defined(RTEMS_SMP)
2627  _Assert( executing == _Thread_Executing );
2628
2629  executing->Scheduler.pin_level += THREAD_PIN_STEP;
2630#else
2631  (void) executing;
2632#endif
2633}
2634
2635/**
2636 * @brief Unpins the thread.
2637 *
2638 * @param executing The currently executing thread.
2639 * @param cpu_self The cpu for the operation.
2640 */
2641RTEMS_INLINE_ROUTINE void _Thread_Unpin(
2642  Thread_Control  *executing,
2643  Per_CPU_Control *cpu_self
2644)
2645{
2646#if defined(RTEMS_SMP)
2647  unsigned int pin_level;
2648
2649  _Assert( executing == _Thread_Executing );
2650
2651  pin_level = executing->Scheduler.pin_level;
2652  _Assert( pin_level > 0 );
2653
2654  if (
2655    RTEMS_PREDICT_TRUE(
2656      pin_level != ( THREAD_PIN_STEP | THREAD_PIN_PREEMPTION )
2657    )
2658  ) {
2659    executing->Scheduler.pin_level = pin_level - THREAD_PIN_STEP;
2660  } else {
2661    _Thread_Do_unpin( executing, cpu_self );
2662  }
2663#else
2664  (void) executing;
2665  (void) cpu_self;
2666#endif
2667}
2668
2669/** @}*/
2670
2671#ifdef __cplusplus
2672}
2673#endif
2674
2675#if defined(RTEMS_MULTIPROCESSING)
2676#include <rtems/score/threadmp.h>
2677#endif
2678
2679#endif
2680/* end of include file */
Note: See TracBrowser for help on using the repository browser.