source: rtems/cpukit/include/rtems/score/threadimpl.h @ 6822af7

Last change on this file since 6822af7 was 8b15a324, checked in by Sebastian Huber <sebastian.huber@…>, on 04/26/21 at 08:15:05

Use alias for rtems_task_self() and pthread_self()

This may reduce the code size a bit.

  • Property mode set to 100644
File size: 71.0 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup RTEMSScoreThread
5 *
6 * @brief This header file provides interfaces of the
7 *   @ref RTEMSScoreThread which are only used by the implementation.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2017 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/schedulernodeimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/timestampimpl.h>
35#include <rtems/score/threadqimpl.h>
36#include <rtems/score/todimpl.h>
37#include <rtems/score/watchdogimpl.h>
38#include <rtems/config.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/**
45 * @addtogroup RTEMSScoreThread
46 *
47 * @{
48 */
49
50/**
51 *  Self for the GNU Ada Run-Time
52 */
53extern void *rtems_ada_self;
54
55/**
56 * @brief Object identifier of the global constructor thread.
57 *
58 * This variable is set by _RTEMS_tasks_Initialize_user_tasks_body() or
59 * _POSIX_Threads_Initialize_user_threads_body().
60 *
61 * It is consumed by _Thread_Handler().
62 */
63extern Objects_Id _Thread_Global_constructor;
64
65/**
66 *  The following points to the thread whose floating point
67 *  context is currently loaded.
68 */
69#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
70extern Thread_Control *_Thread_Allocated_fp;
71#endif
72
73#if defined(RTEMS_SMP)
74#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
75  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
76#endif
77
78typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
79
80/**
81 * @brief Calls the visitor with all threads and the given argument until
82 *      it is done.
83 *
84 * @param visitor Function that gets a thread and @a arg as parameters and
85 *      returns if it is done.
86 * @param arg Parameter for @a visitor
87 */
88void _Thread_Iterate(
89  Thread_Visitor  visitor,
90  void           *arg
91);
92
93/**
94 * @brief Initializes the thread information
95 *
96 * @param[out] information Information to initialize.
97 */
98void _Thread_Initialize_information( Thread_Information *information );
99
100/**
101 * @brief Initializes thread handler.
102 *
103 * This routine performs the initialization necessary for this handler.
104 */
105void _Thread_Handler_initialization(void);
106
107/**
108 * @brief Creates idle thread.
109 *
110 * This routine creates the idle thread.
111 *
112 * @warning No thread should be created before this one.
113 */
114void _Thread_Create_idle(void);
115
116/**
117 * @brief Starts thread multitasking.
118 *
119 * This routine initiates multitasking.  It is invoked only as
120 * part of initialization and its invocation is the last act of
121 * the non-multitasking part of the system initialization.
122 */
123RTEMS_NO_RETURN void _Thread_Start_multitasking( void );
124
125/**
126 * @brief The configuration of a new thread to initialize.
127 */
128typedef struct {
129  /**
130   * @brief The scheduler control instance for the thread.
131   */
132  const struct _Scheduler_Control *scheduler;
133
134  /**
135   * @brief The starting address of the stack area.
136   */
137  void *stack_area;
138
139  /**
140   * @brief The size of the stack area in bytes.
141   */
142  size_t stack_size;
143
144  /**
145   * @brief This member contains the handler to free the stack.
146   *
147   * It shall not be NULL.  Use _Stack_Free_nothing() if nothing is to free.
148   */
149  void ( *stack_free )( void * );
150
151  /**
152   * @brief The new thread's priority.
153   */
154  Priority_Control priority;
155
156  /**
157   * @brief The thread's budget algorithm.
158   */
159  Thread_CPU_budget_algorithms budget_algorithm;
160
161  /**
162   * @brief The thread's initial budget callout.
163   */
164  Thread_CPU_budget_algorithm_callout budget_callout;
165
166  /**
167   * @brief The thread's initial CPU time budget.
168   */
169  uint32_t cpu_time_budget;
170
171  /**
172   * @brief 32-bit unsigned integer name of the object for the thread.
173   */
174  uint32_t name;
175
176  /**
177   * @brief The thread's initial ISR level.
178   */
179  uint32_t isr_level;
180
181  /**
182   * @brief Indicates whether the thread needs a floating-point area.
183   */
184  bool is_fp;
185
186  /**
187   * @brief Indicates whether the new thread is preemptible.
188   */
189  bool is_preemptible;
190} Thread_Configuration;
191
192/**
193 * @brief Initializes thread.
194 *
195 * This routine initializes the specified the thread.  It allocates
196 * all memory associated with this thread.  It completes by adding
197 * the thread to the local object table so operations on this
198 * thread id are allowed.
199 *
200 * @note If stack_area is NULL, it is allocated from the workspace.
201 *
202 * @note If the stack is allocated from the workspace, then it is
203 *       guaranteed to be of at least minimum size.
204 *
205 * @param information The thread information.
206 * @param the_thread The thread to initialize.
207 * @param config The configuration of the thread to initialize.
208 *
209 * @retval STATUS_SUCCESSFUL The thread initialization was successful.
210 *
211 * @retval STATUS_UNSATISFIED The thread initialization failed.
212 */
213Status_Control _Thread_Initialize(
214  Thread_Information         *information,
215  Thread_Control             *the_thread,
216  const Thread_Configuration *config
217);
218
219/**
220 * @brief Frees the thread.
221 *
222 * This routine invokes the thread delete extensions and frees all resources
223 * associated with the thread.  Afterwards the thread object is closed.
224 *
225 * @param[in, out] information is the thread information.
226 *
227 * @param[in, out] the_thread is the thread to free.
228 */
229void _Thread_Free(
230  Thread_Information *information,
231  Thread_Control     *the_thread
232);
233
234/**
235 * @brief Starts the specified thread.
236 *
237 * If the thread is not in the dormant state, the routine returns with a value
238 * of false and performs no actions except enabling interrupts as indicated by
239 * the ISR lock context.
240 *
241 * Otherwise, this routine initializes the executable information for the
242 * thread and makes it ready to execute.  After the call of this routine, the
243 * thread competes with all other ready threads for CPU time.
244 *
245 * Then the routine enables the local interrupts as indicated by the ISR lock
246 * context.
247 *
248 * Then the thread start user extensions are called with thread dispatching
249 * disabled and interrupts enabled after making the thread ready.  Please note
250 * that in SMP configurations, the thread switch and begin user extensions may
251 * be called in parallel on another processor.
252 *
253 * Then thread dispatching is enabled and other threads may execute before the
254 * routine returns.
255 *
256 * @param[in, out] the_thread is the thread to start.
257 *
258 * @param entry is the thread entry information.
259 *
260 * @param[in, out] is the ISR lock context which shall be used to disable the
261 *   local interrupts before the call of this routine.
262 *
263 * @retval STATUS_SUCCESSFUL The thread start was successful.
264 *
265 * @retval STATUS_INCORRECT_STATE The thread was already started.
266 */
267Status_Control _Thread_Start(
268  Thread_Control                 *the_thread,
269  const Thread_Entry_information *entry,
270  ISR_lock_Context               *lock_context
271);
272
273/**
274 * @brief Restarts the currently executing thread.
275 *
276 * @param[in, out] executing The currently executing thread.
277 * @param entry The start entry information for @a executing.
278 * @param lock_context The lock context.
279 */
280RTEMS_NO_RETURN void _Thread_Restart_self(
281  Thread_Control                 *executing,
282  const Thread_Entry_information *entry,
283  ISR_lock_Context               *lock_context
284);
285
286/**
287 * @brief Restarts the thread.
288 *
289 * @param[in, out] the_thread The thread to restart.
290 * @param entry The start entry information for @a the_thread.
291 * @param lock_context The lock context.
292 *
293 * @retval true The operation was successful.
294 * @retval false The operation failed.
295 */
296bool _Thread_Restart_other(
297  Thread_Control                 *the_thread,
298  const Thread_Entry_information *entry,
299  ISR_lock_Context               *lock_context
300);
301
302/**
303 * @brief Yields the currently executing thread.
304 *
305 * @param[in, out] executing The thread that performs a yield.
306 */
307void _Thread_Yield( Thread_Control *executing );
308
309/**
310 * @brief Changes the currently executing thread to a new state with the sets.
311 *
312 * @param clear States to clear.
313 * @param set States to set.
314 * @param ignore States to ignore.
315 *
316 * @return The previous state the thread was in.
317 */
318Thread_Life_state _Thread_Change_life(
319  Thread_Life_state clear,
320  Thread_Life_state set,
321  Thread_Life_state ignore
322);
323
324/**
325 * @brief Set the thread to life protected.
326 *
327 * Calls _Thread_Change_life with the given state AND THREAD_LIFE_PROTECTED to
328 * set and THREAD_LIFE_PROTECTED to clear.
329 *
330 * @param state The states to set.
331 *
332 * @return The previous state the thread was in.
333 */
334Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
335
336/**
337 * @brief Kills all zombie threads in the system.
338 *
339 * Threads change into the zombie state as the last step in the thread
340 * termination sequence right before a context switch to the heir thread is
341 * initiated.  Since the thread stack is still in use during this phase we have
342 * to postpone the thread stack reclamation until this point.  On SMP
343 * configurations we may have to busy wait for context switch completion here.
344 */
345void _Thread_Kill_zombies( void );
346
347/**
348 * @brief Exits the currently executing thread.
349 *
350 * @param[in, out] executing The currently executing thread.
351 * @param set The states to set.
352 * @param[out] exit_value Contains the exit value of the thread.
353 */
354void _Thread_Exit(
355  Thread_Control    *executing,
356  Thread_Life_state  set,
357  void              *exit_value
358);
359
360/**
361 * @brief Joins the currently executing thread with the given thread to wait
362 *      for.
363 *
364 * @param[in, out] the_thread The thread to wait for.
365 * @param waiting_for_join The states control for the join.
366 * @param[in, out] executing The currently executing thread.
367 * @param queue_context The thread queue context.
368 */
369void _Thread_Join(
370  Thread_Control       *the_thread,
371  States_Control        waiting_for_join,
372  Thread_Control       *executing,
373  Thread_queue_Context *queue_context
374);
375
376/**
377 * @brief Cancels the thread.
378 *
379 * @param[in, out] the_thread The thread to cancel.
380 * @param executing The currently executing thread.
381 * @param exit_value The exit value for the thread.
382 */
383void _Thread_Cancel(
384  Thread_Control *the_thread,
385  Thread_Control *executing,
386  void           *exit_value
387);
388
389typedef struct {
390  Thread_queue_Context  Base;
391  Thread_Control       *cancel;
392} Thread_Close_context;
393
394/**
395 * @brief Closes the thread.
396 *
397 * Closes the thread object and starts the thread termination sequence.  In
398 * case the executing thread is not terminated, then this function waits until
399 * the terminating thread reached the zombie state.
400 *
401 * @param the_thread The thread to close.
402 * @param executing The currently executing thread.
403 * @param[in, out] context The thread close context.
404 */
405void _Thread_Close(
406  Thread_Control       *the_thread,
407  Thread_Control       *executing,
408  Thread_Close_context *context
409);
410
411/**
412 * @brief Checks if the thread is ready.
413 *
414 * @param the_thread The thread to check if it is ready.
415 *
416 * @retval true The thread is currently in the ready state.
417 * @retval false The thread is currently not ready.
418 */
419RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
420{
421  return _States_Is_ready( the_thread->current_state );
422}
423
424/**
425 * @brief Clears the specified thread state without locking the lock context.
426 *
427 * In the case the previous state is a non-ready state and the next state is
428 * the ready state, then the thread is unblocked by the scheduler.
429 *
430 * @param[in, out] the_thread The thread.
431 * @param state The state to clear.  It must not be zero.
432 *
433 * @return The thread's previous state.
434 */
435States_Control _Thread_Clear_state_locked(
436  Thread_Control *the_thread,
437  States_Control  state
438);
439
440/**
441 * @brief Clears the specified thread state.
442 *
443 * In the case the previous state is a non-ready state and the next state is
444 * the ready state, then the thread is unblocked by the scheduler.
445 *
446 * @param[in, out] the_thread The thread.
447 * @param state The state to clear.  It must not be zero.
448 *
449 * @return The previous state.
450 */
451States_Control _Thread_Clear_state(
452  Thread_Control *the_thread,
453  States_Control  state
454);
455
456/**
457 * @brief Sets the specified thread state without locking the lock context.
458 *
459 * In the case the previous state is the ready state, then the thread is blocked
460 * by the scheduler.
461 *
462 * @param[in, out] the_thread The thread.
463 * @param state The state to set.  It must not be zero.
464 *
465 * @return The previous state.
466 */
467States_Control _Thread_Set_state_locked(
468  Thread_Control *the_thread,
469  States_Control  state
470);
471
472/**
473 * @brief Sets the specified thread state.
474 *
475 * In the case the previous state is the ready state, then the thread is blocked
476 * by the scheduler.
477 *
478 * @param[in, out] the_thread The thread.
479 * @param state The state to set.  It must not be zero.
480 *
481 * @return The previous state.
482 */
483States_Control _Thread_Set_state(
484  Thread_Control *the_thread,
485  States_Control  state
486);
487
488/**
489 * @brief Initializes enviroment for a thread.
490 *
491 * This routine initializes the context of @a the_thread to its
492 * appropriate starting state.
493 *
494 * @param[in, out] the_thread The pointer to the thread control block.
495 */
496void _Thread_Load_environment(
497  Thread_Control *the_thread
498);
499
500/**
501 * @brief Calls the start kinds idle entry of the thread.
502 *
503 * @param executing The currently executing thread.
504 */
505void _Thread_Entry_adaptor_idle( Thread_Control *executing );
506
507/**
508 * @brief Calls the start kinds numeric entry of the thread.
509 *
510 * @param executing The currently executing thread.
511 */
512void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
513
514/**
515 * @brief Calls the start kinds pointer entry of the thread.
516 *
517 * Stores the return value in the Wait.return_argument of the thread.
518 *
519 * @param executing The currently executing thread.
520 */
521void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
522
523/**
524 * @brief Wrapper function for all threads.
525 *
526 * This routine is the wrapper function for all threads.  It is
527 * the starting point for all threads.  The user provided thread
528 * entry point is invoked by this routine.  Operations
529 * which must be performed immediately before and after the user's
530 * thread executes are found here.
531 *
532 * @note On entry, it is assumed all interrupts are blocked and that this
533 * routine needs to set the initial isr level.  This may or may not
534 * actually be needed by the context switch routine and as a result
535 * interrupts may already be at there proper level.  Either way,
536 * setting the initial isr level properly here is safe.
537 */
538void _Thread_Handler( void );
539
540/**
541 * @brief Acquires the lock context in a critical section.
542 *
543 * @param the_thread The thread to acquire the lock context.
544 * @param lock_context The lock context.
545 */
546RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
547  Thread_Control   *the_thread,
548  ISR_lock_Context *lock_context
549)
550{
551  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
552}
553
554/**
555 * @brief Disables interrupts and acquires the lock_context.
556 *
557 * @param the_thread The thread to acquire the lock context.
558 * @param lock_context The lock context.
559 */
560RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
561  Thread_Control   *the_thread,
562  ISR_lock_Context *lock_context
563)
564{
565  _ISR_lock_ISR_disable( lock_context );
566  _Thread_State_acquire_critical( the_thread, lock_context );
567}
568
569/**
570 * @brief Disables interrupts and acquires the lock context for the currently
571 *      executing thread.
572 *
573 * @param lock_context The lock context.
574 *
575 * @return The currently executing thread.
576 */
577RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
578  ISR_lock_Context *lock_context
579)
580{
581  Thread_Control *executing;
582
583  _ISR_lock_ISR_disable( lock_context );
584  executing = _Thread_Executing;
585  _Thread_State_acquire_critical( executing, lock_context );
586
587  return executing;
588}
589
590/**
591 * @brief Release the lock context in a critical section.
592 *
593 * @param the_thread The thread to release the lock context.
594 * @param lock_context The lock context.
595 */
596RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
597  Thread_Control   *the_thread,
598  ISR_lock_Context *lock_context
599)
600{
601  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
602}
603
604/**
605 * @brief Releases the lock context and enables interrupts.
606 *
607 * @param[in, out] the_thread The thread to release the lock context.
608 * @param[out] lock_context The lock context.
609 */
610RTEMS_INLINE_ROUTINE void _Thread_State_release(
611  Thread_Control   *the_thread,
612  ISR_lock_Context *lock_context
613)
614{
615  _Thread_State_release_critical( the_thread, lock_context );
616  _ISR_lock_ISR_enable( lock_context );
617}
618
619/**
620 * @brief Checks if the thread is owner of the lock of the join queue.
621 *
622 * @param the_thread The thread for the verification.
623 *
624 * @retval true The thread is owner of the lock of the join queue.
625 * @retval false The thread is not owner of the lock of the join queue.
626 */
627#if defined(RTEMS_DEBUG)
628RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
629  const Thread_Control *the_thread
630)
631{
632  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
633}
634#endif
635
636/**
637 * @brief Performs the priority actions specified by the thread queue context
638 * along the thread queue path.
639 *
640 * The caller must be the owner of the thread wait lock.
641 *
642 * @param start_of_path The start thread of the thread queue path.
643 * @param queue_context The thread queue context specifying the thread queue
644 *   path and initial thread priority actions.
645 *
646 * @see _Thread_queue_Path_acquire_critical().
647 */
648void _Thread_Priority_perform_actions(
649  Thread_Control       *start_of_path,
650  Thread_queue_Context *queue_context
651);
652
653/**
654 * @brief Adds the specified thread priority node to the corresponding thread
655 * priority aggregation.
656 *
657 * The caller must be the owner of the thread wait lock.
658 *
659 * @param the_thread The thread.
660 * @param priority_node The thread priority node to add.
661 * @param queue_context The thread queue context to return an updated set of
662 *   threads for _Thread_Priority_update().  The thread queue context must be
663 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
664 *   call of this function.
665 *
666 * @see _Thread_Wait_acquire().
667 */
668void _Thread_Priority_add(
669  Thread_Control       *the_thread,
670  Priority_Node        *priority_node,
671  Thread_queue_Context *queue_context
672);
673
674/**
675 * @brief Removes the specified thread priority node from the corresponding
676 * thread priority aggregation.
677 *
678 * The caller must be the owner of the thread wait lock.
679 *
680 * @param the_thread The thread.
681 * @param priority_node The thread priority node to remove.
682 * @param queue_context The thread queue context to return an updated set of
683 *   threads for _Thread_Priority_update().  The thread queue context must be
684 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
685 *   call of this function.
686 *
687 * @see _Thread_Wait_acquire().
688 */
689void _Thread_Priority_remove(
690  Thread_Control       *the_thread,
691  Priority_Node        *priority_node,
692  Thread_queue_Context *queue_context
693);
694
695/**
696 * @brief Propagates a thread priority value change in the specified thread
697 * priority node to the corresponding thread priority aggregation.
698 *
699 * The caller must be the owner of the thread wait lock.
700 *
701 * @param the_thread The thread.
702 * @param[out] priority_node The thread priority node to change.
703 * @param prepend_it In case this is true, then the thread is prepended to
704 *   its priority group in its home scheduler instance, otherwise it is
705 *   appended.
706 * @param queue_context The thread queue context to return an updated set of
707 *   threads for _Thread_Priority_update().  The thread queue context must be
708 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
709 *   call of this function.
710 *
711 * @see _Thread_Wait_acquire().
712 */
713void _Thread_Priority_changed(
714  Thread_Control       *the_thread,
715  Priority_Node        *priority_node,
716  bool                  prepend_it,
717  Thread_queue_Context *queue_context
718);
719
720/**
721 * @brief Changes the thread priority value of the specified thread priority
722 * node in the corresponding thread priority aggregation.
723 *
724 * The caller must be the owner of the thread wait lock.
725 *
726 * @param the_thread The thread.
727 * @param[out] priority_node The thread priority node to change.
728 * @param new_priority The new thread priority value of the thread priority
729 *   node to change.
730 * @param prepend_it In case this is true, then the thread is prepended to
731 *   its priority group in its home scheduler instance, otherwise it is
732 *   appended.
733 * @param queue_context The thread queue context to return an updated set of
734 *   threads for _Thread_Priority_update().  The thread queue context must be
735 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
736 *   call of this function.
737 *
738 * @see _Thread_Wait_acquire().
739 */
740RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
741  Thread_Control       *the_thread,
742  Priority_Node        *priority_node,
743  Priority_Control      new_priority,
744  bool                  prepend_it,
745  Thread_queue_Context *queue_context
746)
747{
748  _Priority_Node_set_priority( priority_node, new_priority );
749  _Thread_Priority_changed(
750    the_thread,
751    priority_node,
752    prepend_it,
753    queue_context
754  );
755}
756
757/**
758 * @brief Replaces the victim priority node with the replacement priority node
759 * in the corresponding thread priority aggregation.
760 *
761 * The caller must be the owner of the thread wait lock.
762 *
763 * @param the_thread The thread.
764 * @param victim_node The victim thread priority node.
765 * @param replacement_node The replacement thread priority node.
766 *
767 * @see _Thread_Wait_acquire().
768 */
769void _Thread_Priority_replace(
770  Thread_Control *the_thread,
771  Priority_Node  *victim_node,
772  Priority_Node  *replacement_node
773);
774
775/**
776 * @brief Updates the priority of all threads in the set
777 *
778 * @param queue_context The thread queue context to return an updated set of
779 *   threads for _Thread_Priority_update().  The thread queue context must be
780 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
781 *   call of this function.
782 *
783 * @see _Thread_Priority_add(), _Thread_Priority_change(),
784 *   _Thread_Priority_changed() and _Thread_Priority_remove().
785 */
786void _Thread_Priority_update( Thread_queue_Context *queue_context );
787
788/**
789 * @brief Updates the priority of the thread and changes it sticky level.
790 *
791 * @param the_thread The thread.
792 * @param sticky_level_change The new value for the sticky level.
793 */
794#if defined(RTEMS_SMP)
795void _Thread_Priority_and_sticky_update(
796  Thread_Control *the_thread,
797  int             sticky_level_change
798);
799#endif
800
801/**
802 * @brief Checks if the left thread priority is less than the right thread
803 *      priority in the intuitive sense of priority.
804 *
805 * @param left The left thread priority.
806 * @param right The right thread priority.
807 *
808 * @retval true The left priority is less in the intuitive sense.
809 * @retval false The left priority is greater or equal in the intuitive sense.
810 */
811RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
812  Priority_Control left,
813  Priority_Control right
814)
815{
816  return left > right;
817}
818
819/**
820 * @brief Returns the highest priority of the left and right thread priorities
821 * in the intuitive sense of priority.
822 *
823 * @param left The left thread priority.
824 * @param right The right thread priority.
825 *
826 * @return The highest priority in the intuitive sense of priority.
827 */
828RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
829  Priority_Control left,
830  Priority_Control right
831)
832{
833  return _Thread_Priority_less_than( left, right ) ? right : left;
834}
835
836/**
837 * @brief Gets the thread object information for the API of the object
838 *   identifier.
839 *
840 * @param id is an object identifier which defines the API to get the
841 *   associated thread objects information.
842 *
843 * @retval NULL The object identifier had an invalid API.
844 *
845 * @return Returns the thread object information associated with the API of the
846 *   object identifier.
847 */
848RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information_by_id(
849  Objects_Id id
850)
851{
852  uint32_t the_api;
853
854  the_api = _Objects_Get_API( id );
855
856  if ( !_Objects_Is_api_valid( the_api ) ) {
857    return NULL;
858  }
859
860  /*
861   * Threads are always first class :)
862   *
863   * There is no need to validate the object class of the object identifier,
864   * since this will be done by the object get methods.
865   */
866  return _Objects_Information_table[ the_api ][ 1 ];
867}
868
869/**
870 * @brief Gets the thread object information of the thread.
871 *
872 * @param the_thread is the thread to get the thread object information.
873 *
874 * @return Returns the thread object information of the thread.
875 */
876RTEMS_INLINE_ROUTINE Thread_Information *_Thread_Get_objects_information(
877  Thread_Control *the_thread
878)
879{
880  size_t              the_api;
881  Thread_Information *information;
882
883  the_api = (size_t) _Objects_Get_API( the_thread->Object.id );
884  _Assert( _Objects_Is_api_valid( the_api ) );
885
886  information = (Thread_Information *)
887    _Objects_Information_table[ the_api ][ 1 ];
888  _Assert( information != NULL );
889
890  return information;
891}
892
893/**
894 * @brief Gets a thread by its identifier.
895 *
896 * @see _Objects_Get().
897 *
898 * @param id The id of the thread.
899 * @param lock_context The lock context.
900 */
901Thread_Control *_Thread_Get(
902  Objects_Id         id,
903  ISR_lock_Context  *lock_context
904);
905
906/**
907 * @brief Gets the identifier of the calling thread.
908 *
909 * @return Returns the identifier of the calling thread.
910 */
911Objects_Id _Thread_Self_id( void );
912
913/**
914 * @brief Gets the cpu of the thread's scheduler.
915 *
916 * @param thread The thread.
917 *
918 * @return The cpu of the thread's scheduler.
919 */
920RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
921  const Thread_Control *thread
922)
923{
924#if defined(RTEMS_SMP)
925  return thread->Scheduler.cpu;
926#else
927  (void) thread;
928
929  return _Per_CPU_Get();
930#endif
931}
932
933/**
934 * @brief Sets the cpu of the thread's scheduler.
935 *
936 * @param[out] thread The thread.
937 * @param cpu The cpu to set.
938 */
939RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
940  Thread_Control *thread,
941  Per_CPU_Control *cpu
942)
943{
944#if defined(RTEMS_SMP)
945  thread->Scheduler.cpu = cpu;
946#else
947  (void) thread;
948  (void) cpu;
949#endif
950}
951
952/**
953 * @brief Checks if the thread is the currently executing thread.
954 *
955 * This function returns true if the_thread is the currently executing
956 * thread, and false otherwise.
957 *
958 * @param the_thread The thread to verify if it is the currently executing thread.
959 *
960 * @retval true @a the_thread is the currently executing one.
961 * @retval false @a the_thread is not the currently executing one.
962 */
963RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
964  const Thread_Control *the_thread
965)
966{
967  return ( the_thread == _Thread_Executing );
968}
969
970#if defined(RTEMS_SMP)
971/**
972 * @brief Checks if the thread executes currently on some processor in the
973 * system.
974 *
975 * Do not confuse this with _Thread_Is_executing() which checks only the
976 * current processor.
977 *
978 * @param the_thread The thread for the verification.
979 *
980 * @retval true @a the_thread is the currently executing one.
981 * @retval false @a the_thread is not the currently executing one.
982 */
983RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
984  const Thread_Control *the_thread
985)
986{
987  return _CPU_Context_Get_is_executing( &the_thread->Registers );
988}
989#endif
990
991/**
992 * @brief Checks if the thread is the heir.
993 *
994 * This function returns true if the_thread is the heir
995 * thread, and false otherwise.
996 *
997 * @param the_thread The thread for the verification.
998 *
999 * @retval true @a the_thread is the heir.
1000 * @retval false @a the_thread is not the heir.
1001 */
1002RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
1003  const Thread_Control *the_thread
1004)
1005{
1006  return ( the_thread == _Thread_Heir );
1007}
1008
1009/**
1010 * @brief Unblocks the thread.
1011 *
1012 * This routine clears any blocking state for the_thread.  It performs
1013 * any necessary scheduling operations including the selection of
1014 * a new heir thread.
1015 *
1016 * @param[in, out] the_thread The thread to unblock.
1017 */
1018RTEMS_INLINE_ROUTINE void _Thread_Unblock (
1019  Thread_Control *the_thread
1020)
1021{
1022  _Thread_Clear_state( the_thread, STATES_BLOCKED );
1023}
1024
1025/**
1026 * @brief Checks if the floating point context of the thread is currently
1027 *      loaded in the floating point unit.
1028 *
1029 * This function returns true if the floating point context of
1030 * the_thread is currently loaded in the floating point unit, and
1031 * false otherwise.
1032 *
1033 * @param the_thread The thread for the verification.
1034 *
1035 * @retval true The floating point context of @a the_thread is currently
1036 *      loaded in the floating point unit.
1037 * @retval false The floating point context of @a the_thread is currently not
1038 *      loaded in the floating point unit.
1039 */
1040#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1041RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
1042  const Thread_Control *the_thread
1043)
1044{
1045  return ( the_thread == _Thread_Allocated_fp );
1046}
1047#endif
1048
1049/*
1050 * If the CPU has hardware floating point, then we must address saving
1051 * and restoring it as part of the context switch.
1052 *
1053 * The second conditional compilation section selects the algorithm used
1054 * to context switch between floating point tasks.  The deferred algorithm
1055 * can be significantly better in a system with few floating point tasks
1056 * because it reduces the total number of save and restore FP context
1057 * operations.  However, this algorithm can not be used on all CPUs due
1058 * to unpredictable use of FP registers by some compilers for integer
1059 * operations.
1060 */
1061
1062/**
1063 * @brief Saves the executing thread's floating point area.
1064 *
1065 * @param executing The currently executing thread.
1066 */
1067RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
1068{
1069#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1070#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
1071  if ( executing->fp_context != NULL )
1072    _Context_Save_fp( &executing->fp_context );
1073#endif
1074#endif
1075}
1076
1077/**
1078 * @brief Restores the executing thread's floating point area.
1079 *
1080 * @param executing The currently executing thread.
1081 */
1082RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
1083{
1084#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1085#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
1086  if ( (executing->fp_context != NULL) &&
1087       !_Thread_Is_allocated_fp( executing ) ) {
1088    if ( _Thread_Allocated_fp != NULL )
1089      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
1090    _Context_Restore_fp( &executing->fp_context );
1091    _Thread_Allocated_fp = executing;
1092  }
1093#else
1094  if ( executing->fp_context != NULL )
1095    _Context_Restore_fp( &executing->fp_context );
1096#endif
1097#endif
1098}
1099
1100/**
1101 * @brief Deallocates the currently loaded floating point context.
1102 *
1103 * This routine is invoked when the currently loaded floating
1104 * point context is now longer associated with an active thread.
1105 */
1106#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1107RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
1108{
1109  _Thread_Allocated_fp = NULL;
1110}
1111#endif
1112
1113/**
1114 * @brief Checks if dispatching is disabled.
1115 *
1116 * This function returns true if dispatching is disabled, and false
1117 * otherwise.
1118 *
1119 * @retval true Dispatching is disabled.
1120 * @retval false Dispatching is enabled.
1121 */
1122RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
1123{
1124  return ( _Thread_Dispatch_necessary );
1125}
1126
1127/**
1128 * @brief Gets the maximum number of internal threads.
1129 *
1130 * @return The maximum number of internal threads.
1131 */
1132RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
1133{
1134  /* Idle threads */
1135  uint32_t maximum_internal_threads =
1136    rtems_configuration_get_maximum_processors();
1137
1138  /* MPCI thread */
1139#if defined(RTEMS_MULTIPROCESSING)
1140  if ( _System_state_Is_multiprocessing ) {
1141    ++maximum_internal_threads;
1142  }
1143#endif
1144
1145  return maximum_internal_threads;
1146}
1147
1148/**
1149 * @brief Allocates an internal thread and returns it.
1150 *
1151 * @retval pointer Pointer to the allocated Thread_Control.
1152 * @retval NULL The operation failed.
1153 */
1154RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
1155{
1156  return (Thread_Control *)
1157    _Objects_Allocate_unprotected( &_Thread_Information.Objects );
1158}
1159
1160/**
1161 * @brief Gets the heir of the processor and makes it executing.
1162 *
1163 * Must be called with interrupts disabled.  The thread dispatch necessary
1164 * indicator is cleared as a side-effect.
1165 *
1166 * @param[in, out] cpu_self The processor to get the heir of.
1167 *
1168 * @return The heir thread.
1169 *
1170 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
1171 * _Thread_Dispatch_update_heir().
1172 */
1173RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
1174  Per_CPU_Control *cpu_self
1175)
1176{
1177  Thread_Control *heir;
1178
1179  heir = cpu_self->heir;
1180  cpu_self->dispatch_necessary = false;
1181  cpu_self->executing = heir;
1182
1183  return heir;
1184}
1185
1186/**
1187 * @brief Updates the cpu time used of the thread.
1188 *
1189 * @param[in, out] the_thread The thread to add additional cpu time that is
1190 *      used.
1191 * @param cpu The cpu.
1192 */
1193RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
1194  Thread_Control  *the_thread,
1195  Per_CPU_Control *cpu
1196)
1197{
1198  Timestamp_Control last;
1199  Timestamp_Control ran;
1200
1201  last = cpu->cpu_usage_timestamp;
1202  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
1203  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
1204  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
1205}
1206
1207/**
1208 * @brief Updates the used cpu time for the heir and dispatches a new heir.
1209 *
1210 * @param[in, out] cpu_self The current processor.
1211 * @param[in, out] cpu_for_heir The processor to do a dispatch on.
1212 * @param heir The new heir for @a cpu_for_heir.
1213 */
1214#if defined( RTEMS_SMP )
1215RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
1216  Per_CPU_Control *cpu_self,
1217  Per_CPU_Control *cpu_for_heir,
1218  Thread_Control  *heir
1219)
1220{
1221  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
1222
1223  cpu_for_heir->heir = heir;
1224
1225  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
1226}
1227#endif
1228
1229/**
1230 * @brief Gets the used cpu time of the thread and stores it in the given
1231 *      Timestamp_Control.
1232 *
1233 * @param the_thread The thread to get the used cpu time of.
1234 * @param[out] cpu_time_used Stores the used cpu time of @a the_thread.
1235 */
1236void _Thread_Get_CPU_time_used(
1237  Thread_Control    *the_thread,
1238  Timestamp_Control *cpu_time_used
1239);
1240
1241/**
1242 * @brief Initializes the control chain of the action control.
1243 *
1244 * @param[out] action_control The action control to initialize.
1245 */
1246RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
1247  Thread_Action_control *action_control
1248)
1249{
1250  _Chain_Initialize_empty( &action_control->Chain );
1251}
1252
1253/**
1254 * @brief Initializes the Thread action.
1255 *
1256 * @param[out] action The Thread_Action to initialize.
1257 */
1258RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
1259  Thread_Action *action
1260)
1261{
1262  _Chain_Set_off_chain( &action->Node );
1263}
1264
1265/**
1266 * @brief Adds the post switch action to the thread.
1267 *
1268 * The caller shall own the thread state lock.  A thread dispatch is
1269 * requested.
1270 *
1271 * @param[in, out] the_thread is the thread of the action.
1272 *
1273 * @param[in, out] action is the action to add.
1274 *
1275 * @param handler is the handler for the action.
1276 */
1277RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
1278  Thread_Control        *the_thread,
1279  Thread_Action         *action,
1280  Thread_Action_handler  handler
1281)
1282{
1283  Per_CPU_Control *cpu_of_thread;
1284
1285  _Assert( _Thread_State_is_owner( the_thread ) );
1286
1287  cpu_of_thread = _Thread_Get_CPU( the_thread );
1288
1289  action->handler = handler;
1290
1291  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
1292
1293  _Chain_Append_if_is_off_chain_unprotected(
1294    &the_thread->Post_switch_actions.Chain,
1295    &action->Node
1296  );
1297}
1298
1299/**
1300 * @brief Appends the post switch action to the thread.
1301 *
1302 * The caller shall own the thread state lock.  The action shall be inactive.
1303 * The handler of the action shall be already set.  A thread dispatch is not
1304 * requested.
1305 *
1306 * @param[in, out] the_thread is the thread of the action.
1307 *
1308 * @param[in, out] action is the action to add.
1309 */
1310RTEMS_INLINE_ROUTINE void _Thread_Append_post_switch_action(
1311  Thread_Control *the_thread,
1312  Thread_Action  *action
1313)
1314{
1315  _Assert( _Thread_State_is_owner( the_thread ) );
1316  _Assert( action->handler != NULL );
1317
1318  _Chain_Append_unprotected(
1319    &the_thread->Post_switch_actions.Chain,
1320    &action->Node
1321  );
1322}
1323
1324/**
1325 * @brief Checks if the thread life state is restarting.
1326 *
1327 * @param life_state The thread life state for the verification.
1328 *
1329 * @retval true @a life_state is restarting.
1330 * @retval false @a life_state is not restarting.
1331 */
1332RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
1333  Thread_Life_state life_state
1334)
1335{
1336  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
1337}
1338
1339/**
1340 * @brief Checks if the thread life state is terminating.
1341 *
1342 * @param life_state The thread life state for the verification.
1343 *
1344 * @retval true @a life_state is terminating.
1345 * @retval false @a life_state is not terminating.
1346 */
1347RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
1348  Thread_Life_state life_state
1349)
1350{
1351  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
1352}
1353
1354/**
1355 * @brief Checks if the thread life state allos life change.
1356 *
1357 * @param life_state The thread life state for the verification.
1358 *
1359 * @retval true @a life_state allows life change.
1360 * @retval false @a life_state does not allow life change.
1361 */
1362RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
1363  Thread_Life_state life_state
1364)
1365{
1366  return ( life_state
1367    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
1368}
1369
1370/**
1371 * @brief Checks if the thread life state is life changing.
1372 *
1373 * @param life_state The thread life state for the verification.
1374 *
1375 * @retval true @a life_state is life changing.
1376 * @retval false @a life_state is not life changing.
1377 */
1378RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
1379  Thread_Life_state life_state
1380)
1381{
1382  return ( life_state
1383    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
1384}
1385
1386/**
1387 * @brief Checks if the thread is joinable.
1388 *
1389 * @param the_thread The thread for the verification.
1390 *
1391 * @retval true @a life_state is joinable.
1392 * @retval false @a life_state is not joinable.
1393 */
1394RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
1395  const Thread_Control *the_thread
1396)
1397{
1398  _Assert( _Thread_State_is_owner( the_thread ) );
1399  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
1400}
1401
1402/**
1403 * @brief Increments the thread's resource count.
1404 *
1405 * @param[in, out] the_thread The thread to increase the resource count of.
1406 */
1407RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
1408  Thread_Control *the_thread
1409)
1410{
1411#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1412  ++the_thread->resource_count;
1413#else
1414  (void) the_thread;
1415#endif
1416}
1417
1418/**
1419 * @brief Decrements the thread's resource count.
1420 *
1421 * @param[in, out] the_thread The thread to decrement the resource count of.
1422 */
1423RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
1424  Thread_Control *the_thread
1425)
1426{
1427#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1428  --the_thread->resource_count;
1429#else
1430  (void) the_thread;
1431#endif
1432}
1433
1434#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1435/**
1436 * @brief Checks if the thread owns resources.
1437 *
1438 * Resources are accounted with the Thread_Control::resource_count resource
1439 * counter.  This counter is used by mutex objects for example.
1440 *
1441 * @param the_thread The thread.
1442 *
1443 * @retval true The thread owns resources.
1444 * @retval false The thread does not own resources.
1445 */
1446RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
1447  const Thread_Control *the_thread
1448)
1449{
1450  return the_thread->resource_count != 0;
1451}
1452#endif
1453
1454#if defined(RTEMS_SMP)
1455/**
1456 * @brief Cancels the thread's need for help.
1457 *
1458 * @param the_thread The thread to cancel the help request of.
1459 * @param cpu The cpu to get the lock context of in order to
1460 *      cancel the help request.
1461 */
1462RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
1463  Thread_Control  *the_thread,
1464  Per_CPU_Control *cpu
1465)
1466{
1467  ISR_lock_Context lock_context;
1468
1469  _Per_CPU_Acquire( cpu, &lock_context );
1470
1471  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
1472    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
1473    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
1474  }
1475
1476  _Per_CPU_Release( cpu, &lock_context );
1477}
1478#endif
1479
1480/**
1481 * @brief Gets the home scheduler of the thread.
1482 *
1483 * @param the_thread The thread to get the home scheduler of.
1484 *
1485 * @return The thread's home scheduler.
1486 */
1487RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
1488  const Thread_Control *the_thread
1489)
1490{
1491#if defined(RTEMS_SMP)
1492  return the_thread->Scheduler.home_scheduler;
1493#else
1494  (void) the_thread;
1495  return &_Scheduler_Table[ 0 ];
1496#endif
1497}
1498
1499/**
1500 * @brief Gets the scheduler's home node.
1501 *
1502 * @param the_thread The thread to get the home node of.
1503 *
1504 * @return The thread's home node.
1505 */
1506RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1507  const Thread_Control *the_thread
1508)
1509{
1510#if defined(RTEMS_SMP)
1511  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1512  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1513    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1514  );
1515#else
1516  return the_thread->Scheduler.nodes;
1517#endif
1518}
1519
1520/**
1521 * @brief Gets the thread's scheduler node by index.
1522 *
1523 * @param the_thread The thread of which to get a scheduler node.
1524 * @param scheduler_index The index of the desired scheduler node.
1525 *
1526 * @return The scheduler node with the specified index.
1527 */
1528RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1529  const Thread_Control *the_thread,
1530  size_t                scheduler_index
1531)
1532{
1533#if defined(RTEMS_SMP)
1534  return (Scheduler_Node *)
1535    ( (uintptr_t) the_thread->Scheduler.nodes
1536      + scheduler_index * _Scheduler_Node_size );
1537#else
1538  _Assert( scheduler_index == 0 );
1539  (void) scheduler_index;
1540  return the_thread->Scheduler.nodes;
1541#endif
1542}
1543
1544#if defined(RTEMS_SMP)
1545/**
1546 * @brief Acquires the lock context in a critical section.
1547 *
1548 * @param the_thread The thread to acquire the lock context.
1549 * @param lock_context The lock context.
1550 */
1551RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1552  Thread_Control   *the_thread,
1553  ISR_lock_Context *lock_context
1554)
1555{
1556  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1557}
1558
1559/**
1560 * @brief Releases the lock context in a critical section.
1561 *
1562 * @param the_thread The thread to release the lock context.
1563 * @param lock_context The lock context.
1564 */
1565RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1566  Thread_Control   *the_thread,
1567  ISR_lock_Context *lock_context
1568)
1569{
1570  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1571}
1572
1573/**
1574 * @brief Process the thread's scheduler requests.
1575 *
1576 * @param[in, out] the_thread The thread for the operation.
1577 */
1578void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1579
1580/**
1581 * @brief Add a scheduler request to the thread.
1582 *
1583 * @param[in, out] the_thread The thread to add a scheduler request to.
1584 * @param[in, out] scheduler_node The scheduler node for the request.
1585 * @param request The request to add.
1586 */
1587RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1588  Thread_Control         *the_thread,
1589  Scheduler_Node         *scheduler_node,
1590  Scheduler_Node_request  request
1591)
1592{
1593  ISR_lock_Context       lock_context;
1594  Scheduler_Node_request current_request;
1595
1596  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1597
1598  current_request = scheduler_node->Thread.request;
1599
1600  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1601    _Assert(
1602      request == SCHEDULER_NODE_REQUEST_ADD
1603        || request == SCHEDULER_NODE_REQUEST_REMOVE
1604    );
1605    _Assert( scheduler_node->Thread.next_request == NULL );
1606    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1607    the_thread->Scheduler.requests = scheduler_node;
1608  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1609    _Assert(
1610      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1611        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1612      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1613        && request == SCHEDULER_NODE_REQUEST_ADD )
1614    );
1615    request = SCHEDULER_NODE_REQUEST_NOTHING;
1616  }
1617
1618  scheduler_node->Thread.request = request;
1619
1620  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1621}
1622
1623/**
1624 * @brief Adds a wait node to the thread and adds a corresponding
1625 *      request to the thread.
1626 *
1627 * @param[in, out] the_thread The thread to add the wait node to.
1628 * @param scheduler_node The scheduler node which provides the wait node.
1629 */
1630RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1631  Thread_Control *the_thread,
1632  Scheduler_Node *scheduler_node
1633)
1634{
1635  _Chain_Append_unprotected(
1636    &the_thread->Scheduler.Wait_nodes,
1637    &scheduler_node->Thread.Wait_node
1638  );
1639  _Thread_Scheduler_add_request(
1640    the_thread,
1641    scheduler_node,
1642    SCHEDULER_NODE_REQUEST_ADD
1643  );
1644}
1645
1646/**
1647 * @brief Remove a wait node from the thread and add a corresponding request to
1648 *      it.
1649 *
1650 * @param the_thread The thread to add the request to remove a wait node.
1651 * @param scheduler_node The scheduler node to remove a wait node from.
1652 */
1653RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1654  Thread_Control *the_thread,
1655  Scheduler_Node *scheduler_node
1656)
1657{
1658  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1659  _Thread_Scheduler_add_request(
1660    the_thread,
1661    scheduler_node,
1662    SCHEDULER_NODE_REQUEST_REMOVE
1663  );
1664}
1665#endif
1666
1667/**
1668 * @brief Returns the priority of the thread.
1669 *
1670 * Returns the user API and thread wait information relevant thread priority.
1671 * This includes temporary thread priority adjustments due to locking
1672 * protocols, a job release or the POSIX sporadic server for example.
1673 *
1674 * @param the_thread The thread of which to get the priority.
1675 *
1676 * @return The priority of the thread.
1677 */
1678RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1679  const Thread_Control *the_thread
1680)
1681{
1682  Scheduler_Node *scheduler_node;
1683
1684  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1685  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1686}
1687
1688/**
1689 * @brief Returns the unmapped priority of the thread.
1690 *
1691 * @param the_thread The thread of which to get the unmapped priority.
1692 *
1693 * @return The unmapped priority of the thread.
1694 */
1695RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_unmapped_priority(
1696  const Thread_Control *the_thread
1697)
1698{
1699  return SCHEDULER_PRIORITY_UNMAP( _Thread_Get_priority( the_thread ) );
1700}
1701
1702/**
1703 * @brief Returns the unmapped real priority of the thread.
1704 *
1705 * @param the_thread The thread of which to get the unmapped real priority.
1706 *
1707 * @return The unmapped real priority of the thread.
1708 */
1709RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_unmapped_real_priority(
1710  const Thread_Control *the_thread
1711)
1712{
1713  return SCHEDULER_PRIORITY_UNMAP( the_thread->Real_priority.priority );
1714}
1715
1716/**
1717 * @brief Acquires the thread wait default lock inside a critical section
1718 * (interrupts disabled).
1719 *
1720 * @param[in, out] the_thread The thread.
1721 * @param lock_context The lock context used for the corresponding lock
1722 *   release.
1723 *
1724 * @see _Thread_Wait_release_default_critical().
1725 */
1726RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1727  Thread_Control   *the_thread,
1728  ISR_lock_Context *lock_context
1729)
1730{
1731  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1732}
1733
1734/**
1735 * @brief Acquires the thread wait default lock and returns the executing
1736 * thread.
1737 *
1738 * @param lock_context The lock context used for the corresponding lock
1739 *   release.
1740 *
1741 * @return The executing thread.
1742 *
1743 * @see _Thread_Wait_release_default().
1744 */
1745RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1746  ISR_lock_Context *lock_context
1747)
1748{
1749  Thread_Control *executing;
1750
1751  _ISR_lock_ISR_disable( lock_context );
1752  executing = _Thread_Executing;
1753  _Thread_Wait_acquire_default_critical( executing, lock_context );
1754
1755  return executing;
1756}
1757
1758/**
1759 * @brief Acquires the thread wait default lock and disables interrupts.
1760 *
1761 * @param[in, out] the_thread The thread.
1762 * @param[out] lock_context The lock context used for the corresponding lock
1763 *   release.
1764 *
1765 * @see _Thread_Wait_release_default().
1766 */
1767RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1768  Thread_Control   *the_thread,
1769  ISR_lock_Context *lock_context
1770)
1771{
1772  _ISR_lock_ISR_disable( lock_context );
1773  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1774}
1775
1776/**
1777 * @brief Releases the thread wait default lock inside a critical section
1778 * (interrupts disabled).
1779 *
1780 * The previous interrupt status is not restored.
1781 *
1782 * @param[in, out] the_thread The thread.
1783 * @param lock_context The lock context used for the corresponding lock
1784 *   acquire.
1785 */
1786RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1787  Thread_Control   *the_thread,
1788  ISR_lock_Context *lock_context
1789)
1790{
1791  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1792}
1793
1794/**
1795 * @brief Releases the thread wait default lock and restores the previous
1796 * interrupt status.
1797 *
1798 * @param[in, out] the_thread The thread.
1799 * @param[out] lock_context The lock context used for the corresponding lock
1800 *   acquire.
1801 */
1802RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1803  Thread_Control   *the_thread,
1804  ISR_lock_Context *lock_context
1805)
1806{
1807  _Thread_Wait_release_default_critical( the_thread, lock_context );
1808  _ISR_lock_ISR_enable( lock_context );
1809}
1810
1811#if defined(RTEMS_SMP)
1812#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1813  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1814
1815/**
1816 * @brief Removes the first pending wait lock request.
1817 *
1818 * @param the_thread The thread to remove the request from.
1819 * @param queue_lock_context The queue lock context.
1820 */
1821RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1822  Thread_Control            *the_thread,
1823  Thread_queue_Lock_context *queue_lock_context
1824)
1825{
1826  Chain_Node *first;
1827
1828  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1829  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1830
1831  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1832    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1833  }
1834}
1835
1836/**
1837 * @brief Acquires the wait queue inside a critical section.
1838 *
1839 * @param queue The queue that acquires.
1840 * @param queue_lock_context The queue lock context.
1841 */
1842RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1843  Thread_queue_Queue        *queue,
1844  Thread_queue_Lock_context *queue_lock_context
1845)
1846{
1847  _Thread_queue_Queue_acquire_critical(
1848    queue,
1849    &_Thread_Executing->Potpourri_stats,
1850    &queue_lock_context->Lock_context
1851  );
1852}
1853
1854/**
1855 * @brief Releases the wait queue inside a critical section.
1856 *
1857 * @param queue The queue that releases.
1858 * @param queue_lock_context The queue lock context.
1859 */
1860RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1861  Thread_queue_Queue        *queue,
1862  Thread_queue_Lock_context *queue_lock_context
1863)
1864{
1865  _Thread_queue_Queue_release_critical(
1866    queue,
1867    &queue_lock_context->Lock_context
1868  );
1869}
1870#endif
1871
1872/**
1873 * @brief Acquires the thread wait lock inside a critical section (interrupts
1874 * disabled).
1875 *
1876 * @param[in, out] the_thread The thread.
1877 * @param[in, out] queue_context The thread queue context for the corresponding
1878 *   _Thread_Wait_release_critical().
1879 */
1880RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1881  Thread_Control       *the_thread,
1882  Thread_queue_Context *queue_context
1883)
1884{
1885#if defined(RTEMS_SMP)
1886  Thread_queue_Queue *queue;
1887
1888  _Thread_Wait_acquire_default_critical(
1889    the_thread,
1890    &queue_context->Lock_context.Lock_context
1891  );
1892
1893  queue = the_thread->Wait.queue;
1894  queue_context->Lock_context.Wait.queue = queue;
1895
1896  if ( queue != NULL ) {
1897    _Thread_queue_Gate_add(
1898      &the_thread->Wait.Lock.Pending_requests,
1899      &queue_context->Lock_context.Wait.Gate
1900    );
1901    _Thread_Wait_release_default_critical(
1902      the_thread,
1903      &queue_context->Lock_context.Lock_context
1904    );
1905    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1906
1907    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1908      _Thread_Wait_release_queue_critical(
1909        queue,
1910        &queue_context->Lock_context
1911      );
1912      _Thread_Wait_acquire_default_critical(
1913        the_thread,
1914        &queue_context->Lock_context.Lock_context
1915      );
1916      _Thread_Wait_remove_request_locked(
1917        the_thread,
1918        &queue_context->Lock_context
1919      );
1920      _Assert( the_thread->Wait.queue == NULL );
1921    }
1922  }
1923#else
1924  (void) the_thread;
1925  (void) queue_context;
1926#endif
1927}
1928
1929/**
1930 * @brief Acquires the thread wait default lock and disables interrupts.
1931 *
1932 * @param[in, out] the_thread The thread.
1933 * @param[in, out] queue_context The thread queue context for the corresponding
1934 *   _Thread_Wait_release().
1935 */
1936RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1937  Thread_Control       *the_thread,
1938  Thread_queue_Context *queue_context
1939)
1940{
1941  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1942  _Thread_Wait_acquire_critical( the_thread, queue_context );
1943}
1944
1945/**
1946 * @brief Releases the thread wait lock inside a critical section (interrupts
1947 * disabled).
1948 *
1949 * The previous interrupt status is not restored.
1950 *
1951 * @param[in, out] the_thread The thread.
1952 * @param[in, out] queue_context The thread queue context used for corresponding
1953 *   _Thread_Wait_acquire_critical().
1954 */
1955RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1956  Thread_Control       *the_thread,
1957  Thread_queue_Context *queue_context
1958)
1959{
1960#if defined(RTEMS_SMP)
1961  Thread_queue_Queue *queue;
1962
1963  queue = queue_context->Lock_context.Wait.queue;
1964
1965  if ( queue != NULL ) {
1966    _Thread_Wait_release_queue_critical(
1967      queue, &queue_context->Lock_context
1968    );
1969    _Thread_Wait_acquire_default_critical(
1970      the_thread,
1971      &queue_context->Lock_context.Lock_context
1972    );
1973    _Thread_Wait_remove_request_locked(
1974      the_thread,
1975      &queue_context->Lock_context
1976    );
1977  }
1978
1979  _Thread_Wait_release_default_critical(
1980    the_thread,
1981    &queue_context->Lock_context.Lock_context
1982  );
1983#else
1984  (void) the_thread;
1985  (void) queue_context;
1986#endif
1987}
1988
1989/**
1990 * @brief Releases the thread wait lock and restores the previous interrupt
1991 * status.
1992 *
1993 * @param[in, out] the_thread The thread.
1994 * @param[in, out] queue_context The thread queue context used for corresponding
1995 *   _Thread_Wait_acquire().
1996 */
1997RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1998  Thread_Control       *the_thread,
1999  Thread_queue_Context *queue_context
2000)
2001{
2002  _Thread_Wait_release_critical( the_thread, queue_context );
2003  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
2004}
2005
2006/**
2007 * @brief Claims the thread wait queue.
2008 *
2009 * The caller must not be the owner of the default thread wait lock.  The
2010 * caller must be the owner of the corresponding thread queue lock.  The
2011 * registration of the corresponding thread queue operations is deferred and
2012 * done after the deadlock detection.  This is crucial to support timeouts on
2013 * SMP configurations.
2014 *
2015 * @param[in, out] the_thread The thread.
2016 * @param[in, out] queue The new thread queue.
2017 *
2018 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
2019 */
2020RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
2021  Thread_Control     *the_thread,
2022  Thread_queue_Queue *queue
2023)
2024{
2025  ISR_lock_Context lock_context;
2026
2027  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
2028
2029  _Assert( the_thread->Wait.queue == NULL );
2030
2031#if defined(RTEMS_SMP)
2032  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
2033  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
2034  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
2035#endif
2036
2037  the_thread->Wait.queue = queue;
2038
2039  _Thread_Wait_release_default_critical( the_thread, &lock_context );
2040}
2041
2042/**
2043 * @brief Finalizes the thread wait queue claim via registration of the
2044 * corresponding thread queue operations.
2045 *
2046 * @param[in, out] the_thread The thread.
2047 * @param operations The corresponding thread queue operations.
2048 */
2049RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
2050  Thread_Control                *the_thread,
2051  const Thread_queue_Operations *operations
2052)
2053{
2054  the_thread->Wait.operations = operations;
2055}
2056
2057/**
2058 * @brief Removes a thread wait lock request.
2059 *
2060 * On SMP configurations, removes a thread wait lock request.
2061 *
2062 * On other configurations, this function does nothing.
2063 *
2064 * @param[in, out] the_thread The thread.
2065 * @param[in, out] queue_lock_context The thread queue lock context used for
2066 *   corresponding _Thread_Wait_acquire().
2067 */
2068RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
2069  Thread_Control            *the_thread,
2070  Thread_queue_Lock_context *queue_lock_context
2071)
2072{
2073#if defined(RTEMS_SMP)
2074  ISR_lock_Context lock_context;
2075
2076  _Thread_Wait_acquire_default( the_thread, &lock_context );
2077  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
2078  _Thread_Wait_release_default( the_thread, &lock_context );
2079#else
2080  (void) the_thread;
2081  (void) queue_lock_context;
2082#endif
2083}
2084
2085/**
2086 * @brief Restores the default thread wait queue and operations.
2087 *
2088 * The caller must be the owner of the current thread wait queue lock.
2089 *
2090 * On SMP configurations, the pending requests are updated to use the stale
2091 * thread queue operations.
2092 *
2093 * @param[in, out] the_thread The thread.
2094 *
2095 * @see _Thread_Wait_claim().
2096 */
2097RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
2098  Thread_Control *the_thread
2099)
2100{
2101#if defined(RTEMS_SMP)
2102  ISR_lock_Context  lock_context;
2103  Chain_Node       *node;
2104  const Chain_Node *tail;
2105
2106  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
2107
2108  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
2109  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
2110
2111  if ( node != tail ) {
2112    do {
2113      Thread_queue_Context *queue_context;
2114
2115      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
2116      queue_context->Lock_context.Wait.queue = NULL;
2117
2118      node = _Chain_Next( node );
2119    } while ( node != tail );
2120
2121    _Thread_queue_Gate_add(
2122      &the_thread->Wait.Lock.Pending_requests,
2123      &the_thread->Wait.Lock.Tranquilizer
2124    );
2125  } else {
2126    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
2127  }
2128#endif
2129
2130  the_thread->Wait.queue = NULL;
2131  the_thread->Wait.operations = &_Thread_queue_Operations_default;
2132
2133#if defined(RTEMS_SMP)
2134  _Thread_Wait_release_default_critical( the_thread, &lock_context );
2135#endif
2136}
2137
2138/**
2139 * @brief Tranquilizes the thread after a wait on a thread queue.
2140 *
2141 * After the violent blocking procedure this function makes the thread calm and
2142 * peaceful again so that it can carry out its normal work.
2143 *
2144 * On SMP configurations, ensures that all pending thread wait lock requests
2145 * completed before the thread is able to begin a new thread wait procedure.
2146 *
2147 * On other configurations, this function does nothing.
2148 *
2149 * It must be called after a _Thread_Wait_claim() exactly once
2150 *  - after the corresponding thread queue lock was released, and
2151 *  - the default wait state is restored or some other processor is about to do
2152 *    this.
2153 *
2154 * @param the_thread The thread.
2155 */
2156RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
2157  Thread_Control *the_thread
2158)
2159{
2160#if defined(RTEMS_SMP)
2161  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
2162#else
2163  (void) the_thread;
2164#endif
2165}
2166
2167/**
2168 * @brief Cancels a thread wait on a thread queue.
2169 *
2170 * @param[in, out] the_thread The thread.
2171 * @param queue_context The thread queue context used for corresponding
2172 *   _Thread_Wait_acquire().
2173 */
2174RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
2175  Thread_Control       *the_thread,
2176  Thread_queue_Context *queue_context
2177)
2178{
2179  Thread_queue_Queue *queue;
2180
2181  queue = the_thread->Wait.queue;
2182
2183#if defined(RTEMS_SMP)
2184  if ( queue != NULL ) {
2185    _Assert( queue_context->Lock_context.Wait.queue == queue );
2186#endif
2187
2188    ( *the_thread->Wait.operations->extract )(
2189      queue,
2190      the_thread,
2191      queue_context
2192    );
2193    _Thread_Wait_restore_default( the_thread );
2194
2195#if defined(RTEMS_SMP)
2196    _Assert( queue_context->Lock_context.Wait.queue == NULL );
2197    queue_context->Lock_context.Wait.queue = queue;
2198  }
2199#endif
2200}
2201
2202/**
2203 * @brief The initial thread wait flags value set by _Thread_Initialize().
2204 */
2205#define THREAD_WAIT_FLAGS_INITIAL 0x0U
2206
2207/**
2208 * @brief Mask to get the thread wait state flags.
2209 */
2210#define THREAD_WAIT_STATE_MASK 0xffU
2211
2212/**
2213 * @brief Indicates that the thread begins with the blocking operation.
2214 *
2215 * A blocking operation consists of an optional watchdog initialization and the
2216 * setting of the appropriate thread blocking state with the corresponding
2217 * scheduler block operation.
2218 */
2219#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
2220
2221/**
2222 * @brief Indicates that the thread completed the blocking operation.
2223 */
2224#define THREAD_WAIT_STATE_BLOCKED 0x2U
2225
2226/**
2227 * @brief Indicates that a condition to end the thread wait occurred.
2228 *
2229 * This could be a timeout, a signal, an event or a resource availability.
2230 */
2231#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
2232
2233/**
2234 * @brief Mask to get the thread wait class flags.
2235 */
2236#define THREAD_WAIT_CLASS_MASK 0xff00U
2237
2238/**
2239 * @brief Indicates that the thread waits for an event.
2240 */
2241#define THREAD_WAIT_CLASS_EVENT 0x100U
2242
2243/**
2244 * @brief Indicates that the thread waits for a system event.
2245 */
2246#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
2247
2248/**
2249 * @brief Indicates that the thread waits for an object.
2250 */
2251#define THREAD_WAIT_CLASS_OBJECT 0x400U
2252
2253/**
2254 * @brief Indicates that the thread waits for a period.
2255 */
2256#define THREAD_WAIT_CLASS_PERIOD 0x800U
2257
2258/**
2259 * @brief Sets the thread's wait flags.
2260 *
2261 * @param[in, out] the_thread The thread to set the wait flags of.
2262 * @param flags The flags to set.
2263 */
2264RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
2265  Thread_Control    *the_thread,
2266  Thread_Wait_flags  flags
2267)
2268{
2269#if defined(RTEMS_SMP)
2270  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
2271#else
2272  the_thread->Wait.flags = flags;
2273#endif
2274}
2275
2276/**
2277 * @brief Gets the thread's wait flags according to the ATOMIC_ORDER_RELAXED.
2278 *
2279 * @param the_thread The thread to get the wait flags of.
2280 *
2281 * @return The thread's wait flags.
2282 */
2283RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
2284  const Thread_Control *the_thread
2285)
2286{
2287#if defined(RTEMS_SMP)
2288  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
2289#else
2290  return the_thread->Wait.flags;
2291#endif
2292}
2293
2294/**
2295 * @brief Gets the thread's wait flags according to the ATOMIC_ORDER_ACQUIRE.
2296 *
2297 * @param the_thread The thread to get the wait flags of.
2298 *
2299 * @return The thread's wait flags.
2300 */
2301RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
2302  const Thread_Control *the_thread
2303)
2304{
2305#if defined(RTEMS_SMP)
2306  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
2307#else
2308  return the_thread->Wait.flags;
2309#endif
2310}
2311
2312/**
2313 * @brief Tries to change the thread wait flags with release semantics in case
2314 * of success.
2315 *
2316 * Must be called inside a critical section (interrupts disabled).
2317 *
2318 * In case the wait flags are equal to the expected wait flags, then the wait
2319 * flags are set to the desired wait flags.
2320 *
2321 * @param the_thread The thread.
2322 * @param expected_flags The expected wait flags.
2323 * @param desired_flags The desired wait flags.
2324 *
2325 * @retval true The wait flags were equal to the expected wait flags.
2326 * @retval false The wait flags were not equal to the expected wait flags.
2327 */
2328RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
2329  Thread_Control    *the_thread,
2330  Thread_Wait_flags  expected_flags,
2331  Thread_Wait_flags  desired_flags
2332)
2333{
2334  _Assert( _ISR_Get_level() != 0 );
2335
2336#if defined(RTEMS_SMP)
2337  return _Atomic_Compare_exchange_uint(
2338    &the_thread->Wait.flags,
2339    &expected_flags,
2340    desired_flags,
2341    ATOMIC_ORDER_RELEASE,
2342    ATOMIC_ORDER_RELAXED
2343  );
2344#else
2345  bool success = ( the_thread->Wait.flags == expected_flags );
2346
2347  if ( success ) {
2348    the_thread->Wait.flags = desired_flags;
2349  }
2350
2351  return success;
2352#endif
2353}
2354
2355/**
2356 * @brief Tries to change the thread wait flags with acquire semantics.
2357 *
2358 * In case the wait flags are equal to the expected wait flags, then the wait
2359 * flags are set to the desired wait flags.
2360 *
2361 * @param the_thread The thread.
2362 * @param expected_flags The expected wait flags.
2363 * @param desired_flags The desired wait flags.
2364 *
2365 * @retval true The wait flags were equal to the expected wait flags.
2366 * @retval false The wait flags were not equal to the expected wait flags.
2367 */
2368RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
2369  Thread_Control    *the_thread,
2370  Thread_Wait_flags  expected_flags,
2371  Thread_Wait_flags  desired_flags
2372)
2373{
2374#if defined(RTEMS_SMP)
2375  return _Atomic_Compare_exchange_uint(
2376    &the_thread->Wait.flags,
2377    &expected_flags,
2378    desired_flags,
2379    ATOMIC_ORDER_ACQUIRE,
2380    ATOMIC_ORDER_ACQUIRE
2381  );
2382#else
2383  bool      success;
2384  ISR_Level level;
2385
2386  _ISR_Local_disable( level );
2387
2388  success = _Thread_Wait_flags_try_change_release(
2389    the_thread,
2390    expected_flags,
2391    desired_flags
2392  );
2393
2394  _ISR_Local_enable( level );
2395  return success;
2396#endif
2397}
2398
2399/**
2400 * @brief Returns the object identifier of the object containing the current
2401 * thread wait queue.
2402 *
2403 * This function may be used for debug and system information purposes.  The
2404 * caller must be the owner of the thread lock.
2405 *
2406 * @param the_thread The thread.
2407 *
2408 * @retval 0 The thread waits on no thread queue currently, the thread wait
2409 *   queue is not contained in an object, or the current thread state provides
2410 *   insufficient information, e.g. the thread is in the middle of a blocking
2411 *   operation.
2412 * @retval other The object identifier of the object containing the thread wait
2413 *   queue.
2414 */
2415Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
2416
2417/**
2418 * @brief Get the status of the wait return code of the thread.
2419 *
2420 * @param the_thread The thread to get the status of the wait return code of.
2421 */
2422RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
2423  const Thread_Control *the_thread
2424)
2425{
2426  return (Status_Control) the_thread->Wait.return_code;
2427}
2428
2429/**
2430 * @brief Cancels a blocking operation so that the thread can continue its
2431 * execution.
2432 *
2433 * In case this function actually cancelled the blocking operation, then the
2434 * thread wait return code is set to the specified status.
2435 *
2436 * A specialization of this function is _Thread_Timeout().
2437 *
2438 * @param[in, out] the_thread The thread.
2439 * @param status The thread wait status.
2440 */
2441void _Thread_Continue( Thread_Control *the_thread, Status_Control status );
2442
2443/**
2444 * @brief General purpose thread wait timeout.
2445 *
2446 * @param the_watchdog The thread timer watchdog.
2447 */
2448void _Thread_Timeout( Watchdog_Control *the_watchdog );
2449
2450/**
2451 * @brief Initializes the thread timer.
2452 *
2453 * @param [in, out] timer The timer to initialize.
2454 * @param cpu The cpu for the operation.
2455 */
2456RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
2457  Thread_Timer_information *timer,
2458  Per_CPU_Control          *cpu
2459)
2460{
2461  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
2462  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
2463  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
2464}
2465
2466/**
2467 * @brief Adds timeout ticks to the thread.
2468 *
2469 * @param[in, out] the_thread The thread to add the timeout ticks to.
2470 * @param cpu The cpu for the operation.
2471 * @param ticks The ticks to add to the timeout ticks.
2472 */
2473RTEMS_INLINE_ROUTINE void _Thread_Add_timeout_ticks(
2474  Thread_Control    *the_thread,
2475  Per_CPU_Control   *cpu,
2476  Watchdog_Interval  ticks
2477)
2478{
2479  ISR_lock_Context lock_context;
2480
2481  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2482
2483  the_thread->Timer.header =
2484    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
2485  the_thread->Timer.Watchdog.routine = _Thread_Timeout;
2486  _Watchdog_Per_CPU_insert_ticks( &the_thread->Timer.Watchdog, cpu, ticks );
2487
2488  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2489}
2490
2491/**
2492 * @brief Inserts the cpu's watchdog realtime into the thread's timer.
2493 *
2494 * @param[in, out] the_thread for the operation.
2495 * @param cpu The cpu to get the watchdog header from.
2496 * @param routine The watchdog routine for the thread.
2497 * @param expire Expiration for the watchdog.
2498 */
2499RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_realtime(
2500  Thread_Control                 *the_thread,
2501  Per_CPU_Control                *cpu,
2502  Watchdog_Service_routine_entry  routine,
2503  uint64_t                        expire
2504)
2505{
2506  ISR_lock_Context  lock_context;
2507  Watchdog_Header  *header;
2508
2509  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2510
2511  header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
2512  the_thread->Timer.header = header;
2513  the_thread->Timer.Watchdog.routine = routine;
2514  _Watchdog_Per_CPU_insert( &the_thread->Timer.Watchdog, cpu, header, expire );
2515
2516  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2517}
2518
2519/**
2520 * @brief Remove the watchdog timer from the thread.
2521 *
2522 * @param[in, out] the_thread The thread to remove the watchdog from.
2523 */
2524RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
2525{
2526  ISR_lock_Context lock_context;
2527
2528  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2529
2530  _Watchdog_Per_CPU_remove(
2531    &the_thread->Timer.Watchdog,
2532#if defined(RTEMS_SMP)
2533    the_thread->Timer.Watchdog.cpu,
2534#else
2535    _Per_CPU_Get(),
2536#endif
2537    the_thread->Timer.header
2538  );
2539
2540  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2541}
2542
2543/**
2544 * @brief Remove the watchdog timer from the thread and unblock if necessary.
2545 *
2546 * @param[in, out] the_thread The thread to remove the watchdog from and unblock
2547 *      if necessary.
2548 * @param queue The thread queue.
2549 */
2550RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
2551  Thread_Control     *the_thread,
2552  Thread_queue_Queue *queue
2553)
2554{
2555  _Thread_Wait_tranquilize( the_thread );
2556  _Thread_Timer_remove( the_thread );
2557
2558#if defined(RTEMS_MULTIPROCESSING)
2559  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
2560    _Thread_Unblock( the_thread );
2561  } else {
2562    _Thread_queue_Unblock_proxy( queue, the_thread );
2563  }
2564#else
2565  (void) queue;
2566  _Thread_Unblock( the_thread );
2567#endif
2568}
2569
2570/**
2571 * @brief Sets the name of the thread.
2572 *
2573 * @param[out] the_thread  The thread to change the name of.
2574 * @param name The new name for the thread.
2575 *
2576 * @retval STATUS_SUCCESSFUL The operation succeeded.
2577 * @retval STATUS_RESULT_TOO_LARGE The name was too long.
2578 */
2579Status_Control _Thread_Set_name(
2580  Thread_Control *the_thread,
2581  const char     *name
2582);
2583
2584/**
2585 * @brief Gets the name of the thread.
2586 *
2587 * @param the_thread The thread to get the name of.
2588 * @param[out] buffer Contains the thread's name.
2589 * @param buffer_size The size of @a buffer.
2590 *
2591 * @return The number of bytes copied to @a buffer.
2592 */
2593size_t _Thread_Get_name(
2594  const Thread_Control *the_thread,
2595  char                 *buffer,
2596  size_t                buffer_size
2597);
2598
2599#if defined(RTEMS_SMP)
2600#define THREAD_PIN_STEP 2
2601
2602#define THREAD_PIN_PREEMPTION 1
2603
2604/**
2605 * @brief Unpins the thread.
2606 *
2607 * @param executing The currently executing thread.
2608 * @param cpu_self The cpu for the operation.
2609 */
2610void _Thread_Do_unpin(
2611  Thread_Control  *executing,
2612  Per_CPU_Control *cpu_self
2613);
2614#endif
2615
2616/**
2617 * @brief Pin the executing thread.
2618 *
2619 * @param executing The currently executing thread.
2620 */
2621RTEMS_INLINE_ROUTINE void _Thread_Pin( Thread_Control *executing )
2622{
2623#if defined(RTEMS_SMP)
2624  _Assert( executing == _Thread_Executing );
2625
2626  executing->Scheduler.pin_level += THREAD_PIN_STEP;
2627#else
2628  (void) executing;
2629#endif
2630}
2631
2632/**
2633 * @brief Unpins the thread.
2634 *
2635 * @param executing The currently executing thread.
2636 * @param cpu_self The cpu for the operation.
2637 */
2638RTEMS_INLINE_ROUTINE void _Thread_Unpin(
2639  Thread_Control  *executing,
2640  Per_CPU_Control *cpu_self
2641)
2642{
2643#if defined(RTEMS_SMP)
2644  unsigned int pin_level;
2645
2646  _Assert( executing == _Thread_Executing );
2647
2648  pin_level = executing->Scheduler.pin_level;
2649  _Assert( pin_level > 0 );
2650
2651  if (
2652    RTEMS_PREDICT_TRUE(
2653      pin_level != ( THREAD_PIN_STEP | THREAD_PIN_PREEMPTION )
2654    )
2655  ) {
2656    executing->Scheduler.pin_level = pin_level - THREAD_PIN_STEP;
2657  } else {
2658    _Thread_Do_unpin( executing, cpu_self );
2659  }
2660#else
2661  (void) executing;
2662  (void) cpu_self;
2663#endif
2664}
2665
2666/** @}*/
2667
2668#ifdef __cplusplus
2669}
2670#endif
2671
2672#if defined(RTEMS_MULTIPROCESSING)
2673#include <rtems/score/threadmp.h>
2674#endif
2675
2676#endif
2677/* end of include file */
Note: See TracBrowser for help on using the repository browser.