source: rtems/cpukit/include/rtems/score/threadimpl.h @ 5803f37

5
Last change on this file since 5803f37 was 5803f37, checked in by Sebastian Huber <sebastian.huber@…>, on 06/28/19 at 06:30:11

score: Add and use _Thread_Get_unmapped_priority().

Add and use _Thread_Get_unmapped_real_priority().

  • Property mode set to 100644
File size: 69.3 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup RTEMSScoreThread
5 *
6 * @brief Inlined Routines from the Thread Handler
7 *
8 * This file contains the macro implementation of the inlined
9 * routines from the Thread handler.
10 */
11
12/*
13 *  COPYRIGHT (c) 1989-2008.
14 *  On-Line Applications Research Corporation (OAR).
15 *
16 *  Copyright (c) 2014, 2017 embedded brains GmbH.
17 *
18 *  The license and distribution terms for this file may be
19 *  found in the file LICENSE in this distribution or at
20 *  http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_THREADIMPL_H
24#define _RTEMS_SCORE_THREADIMPL_H
25
26#include <rtems/score/thread.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/interr.h>
30#include <rtems/score/isr.h>
31#include <rtems/score/objectimpl.h>
32#include <rtems/score/schedulernodeimpl.h>
33#include <rtems/score/statesimpl.h>
34#include <rtems/score/status.h>
35#include <rtems/score/sysstate.h>
36#include <rtems/score/timestampimpl.h>
37#include <rtems/score/threadqimpl.h>
38#include <rtems/score/todimpl.h>
39#include <rtems/score/watchdogimpl.h>
40#include <rtems/config.h>
41
42#ifdef __cplusplus
43extern "C" {
44#endif
45
46/**
47 * @addtogroup RTEMSScoreThread
48 *
49 * @{
50 */
51
52/**
53 *  The following structure contains the information necessary to manage
54 *  a thread which it is  waiting for a resource.
55 */
56#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
57
58/**
59 *  Self for the GNU Ada Run-Time
60 */
61extern void *rtems_ada_self;
62
63/**
64 * @brief Object identifier of the global constructor thread.
65 *
66 * This variable is set by _RTEMS_tasks_Initialize_user_tasks_body() or
67 * _POSIX_Threads_Initialize_user_threads_body().
68 *
69 * It is consumed by _Thread_Handler().
70 */
71extern Objects_Id _Thread_Global_constructor;
72
73/**
74 *  The following points to the thread whose floating point
75 *  context is currently loaded.
76 */
77#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
78extern Thread_Control *_Thread_Allocated_fp;
79#endif
80
81#if defined(RTEMS_SMP)
82#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
83  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
84#endif
85
86typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
87
88/**
89 * @brief Calls the visitor with all threads and the given argument until
90 *      it is done.
91 *
92 * @param visitor Function that gets a thread and @a arg as parameters and
93 *      returns if it is done.
94 * @param arg Parameter for @a visitor
95 */
96void _Thread_Iterate(
97  Thread_Visitor  visitor,
98  void           *arg
99);
100
101/**
102 * @brief Initializes the thread information
103 *
104 * @param[out] information Information to initialize.
105 */
106void _Thread_Initialize_information( Thread_Information *information );
107
108/**
109 * @brief Initializes thread handler.
110 *
111 * This routine performs the initialization necessary for this handler.
112 */
113void _Thread_Handler_initialization(void);
114
115/**
116 * @brief Creates idle thread.
117 *
118 * This routine creates the idle thread.
119 *
120 * @warning No thread should be created before this one.
121 */
122void _Thread_Create_idle(void);
123
124/**
125 * @brief Starts thread multitasking.
126 *
127 * This routine initiates multitasking.  It is invoked only as
128 * part of initialization and its invocation is the last act of
129 * the non-multitasking part of the system initialization.
130 */
131void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
132
133/**
134 * @brief Allocates the requested stack space for the thread.
135 *
136 * Allocate the requested stack space for the thread.
137 * Set the Start.stack field to the address of the stack.
138 *
139 * @param[out] the_thread The thread where the stack space is requested.
140 * @param stack_size The stack space that is requested.
141 *
142 * @retval actual Size allocated after any adjustment.
143 * @retval zero The allocation failed.
144 */
145size_t _Thread_Stack_Allocate(
146  Thread_Control *the_thread,
147  size_t          stack_size
148);
149
150/**
151 * @brief Deallocates thread stack.
152 *
153 * Deallocate the Thread's stack.
154 *
155 * @param[out] the_thread The thread to deallocate the stack of.
156 */
157void _Thread_Stack_Free(
158  Thread_Control *the_thread
159);
160
161/**
162 * @brief Initializes thread.
163 *
164 * This routine initializes the specified the thread.  It allocates
165 * all memory associated with this thread.  It completes by adding
166 * the thread to the local object table so operations on this
167 * thread id are allowed.
168 *
169 * @note If stack_area is NULL, it is allocated from the workspace.
170 *
171 * @note If the stack is allocated from the workspace, then it is
172 *       guaranteed to be of at least minimum size.
173 *
174 * @param information The thread information.
175 * @param[out] the_thread The thread to initialize.
176 * @param scheduler The scheduler control instance for the thread.
177 * @param stack_area The starting address of the thread area.
178 * @param stack_size The size of the thread area in bytes.
179 * @param is_fp Indicates whether the thread needs a floating point area.
180 * @param priority The new thread's priority.
181 * @param is_preemptible Indicates whether the new thread is preemptible.
182 * @param budget_algorithm The thread's budget algorithm.
183 * @param budget_callout The thread's initial budget callout.
184 * @param isr_level The thread's initial isr level.
185 * @param name Name of the object for the thread.
186 *
187 * @retval true The thread initialization was successful.
188 * @retval false The thread initialization failed.
189 */
190bool _Thread_Initialize(
191  Thread_Information                   *information,
192  Thread_Control                       *the_thread,
193  const struct _Scheduler_Control      *scheduler,
194  void                                 *stack_area,
195  size_t                                stack_size,
196  bool                                  is_fp,
197  Priority_Control                      priority,
198  bool                                  is_preemptible,
199  Thread_CPU_budget_algorithms          budget_algorithm,
200  Thread_CPU_budget_algorithm_callout   budget_callout,
201  uint32_t                              isr_level,
202  Objects_Name                          name
203);
204
205/**
206 * @brief Initializes thread and executes it.
207 *
208 * This routine initializes the executable information for a thread
209 * and makes it ready to execute.  After this routine executes, the
210 * thread competes with all other threads for CPU time.
211 *
212 * @param the_thread The thread to be started.
213 * @param entry The thread entry information.
214 */
215bool _Thread_Start(
216  Thread_Control                 *the_thread,
217  const Thread_Entry_information *entry,
218  ISR_lock_Context               *lock_context
219);
220
221/**
222 * @brief Restarts the currently executing thread.
223 *
224 * @param[in, out] executing The currently executing thread.
225 * @param entry The start entry information for @a executing.
226 * @param lock_context The lock context.
227 */
228void _Thread_Restart_self(
229  Thread_Control                 *executing,
230  const Thread_Entry_information *entry,
231  ISR_lock_Context               *lock_context
232) RTEMS_NO_RETURN;
233
234/**
235 * @brief Restarts the thread.
236 *
237 * @param[in, out] the_thread The thread to restart.
238 * @param entry The start entry information for @a the_thread.
239 * @param lock_context The lock context.
240 *
241 * @retval true The operation was successful.
242 * @retval false The operation failed.
243 */
244bool _Thread_Restart_other(
245  Thread_Control                 *the_thread,
246  const Thread_Entry_information *entry,
247  ISR_lock_Context               *lock_context
248);
249
250/**
251 * @brief Yields the currently executing thread.
252 *
253 * @param[in, out] executing The thread that performs a yield.
254 */
255void _Thread_Yield( Thread_Control *executing );
256
257/**
258 * @brief Changes the currently executing thread to a new state with the sets.
259 *
260 * @param clear States to clear.
261 * @param set States to set.
262 * @param ignore States to ignore.
263 *
264 * @return The previous state the thread was in.
265 */
266Thread_Life_state _Thread_Change_life(
267  Thread_Life_state clear,
268  Thread_Life_state set,
269  Thread_Life_state ignore
270);
271
272/**
273 * @brief Set the thread to life protected.
274 *
275 * Calls _Thread_Change_life with the given state AND THREAD_LIFE_PROTECTED to
276 * set and THREAD_LIFE_PROTECTED to clear.
277 *
278 * @param state The states to set.
279 *
280 * @return The previous state the thread was in.
281 */
282Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
283
284/**
285 * @brief Kills all zombie threads in the system.
286 *
287 * Threads change into the zombie state as the last step in the thread
288 * termination sequence right before a context switch to the heir thread is
289 * initiated.  Since the thread stack is still in use during this phase we have
290 * to postpone the thread stack reclamation until this point.  On SMP
291 * configurations we may have to busy wait for context switch completion here.
292 */
293void _Thread_Kill_zombies( void );
294
295/**
296 * @brief Exits the currently executing thread.
297 *
298 * @param[in, out] executing The currently executing thread.
299 * @param set The states to set.
300 * @param[out] exit_value Contains the exit value of the thread.
301 */
302void _Thread_Exit(
303  Thread_Control    *executing,
304  Thread_Life_state  set,
305  void              *exit_value
306);
307
308/**
309 * @brief Joins the currently executing thread with the given thread to wait
310 *      for.
311 *
312 * @param[in, out] the_thread The thread to wait for.
313 * @param waiting_for_join The states control for the join.
314 * @param[in, out] executing The currently executing thread.
315 * @param queue_context The thread queue context.
316 */
317void _Thread_Join(
318  Thread_Control       *the_thread,
319  States_Control        waiting_for_join,
320  Thread_Control       *executing,
321  Thread_queue_Context *queue_context
322);
323
324/**
325 * @brief Cancels the thread.
326 *
327 * @param[in, out] the_thread The thread to cancel.
328 * @param executing The currently executing thread.
329 * @param exit_value The exit value for the thread.
330 */
331void _Thread_Cancel(
332  Thread_Control *the_thread,
333  Thread_Control *executing,
334  void           *exit_value
335);
336
337typedef struct {
338  Thread_queue_Context  Base;
339  Thread_Control       *cancel;
340} Thread_Close_context;
341
342/**
343 * @brief Closes the thread.
344 *
345 * Closes the thread object and starts the thread termination sequence.  In
346 * case the executing thread is not terminated, then this function waits until
347 * the terminating thread reached the zombie state.
348 *
349 * @param the_thread The thread to close.
350 * @param executing The currently executing thread.
351 * @param[in, out] context The thread close context.
352 */
353void _Thread_Close(
354  Thread_Control       *the_thread,
355  Thread_Control       *executing,
356  Thread_Close_context *context
357);
358
359/**
360 * @brief Checks if the thread is ready.
361 *
362 * @param the_thread The thread to check if it is ready.
363 *
364 * @retval true The thread is currently in the ready state.
365 * @retval false The thread is currently not ready.
366 */
367RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
368{
369  return _States_Is_ready( the_thread->current_state );
370}
371
372/**
373 * @brief Clears the specified thread state without locking the lock context.
374 *
375 * In the case the previous state is a non-ready state and the next state is
376 * the ready state, then the thread is unblocked by the scheduler.
377 *
378 * @param[in, out] the_thread The thread.
379 * @param state The state to clear.  It must not be zero.
380 *
381 * @return The thread's previous state.
382 */
383States_Control _Thread_Clear_state_locked(
384  Thread_Control *the_thread,
385  States_Control  state
386);
387
388/**
389 * @brief Clears the specified thread state.
390 *
391 * In the case the previous state is a non-ready state and the next state is
392 * the ready state, then the thread is unblocked by the scheduler.
393 *
394 * @param[in, out] the_thread The thread.
395 * @param state The state to clear.  It must not be zero.
396 *
397 * @return The previous state.
398 */
399States_Control _Thread_Clear_state(
400  Thread_Control *the_thread,
401  States_Control  state
402);
403
404/**
405 * @brief Sets the specified thread state without locking the lock context.
406 *
407 * In the case the previous state is the ready state, then the thread is blocked
408 * by the scheduler.
409 *
410 * @param[in, out] the_thread The thread.
411 * @param state The state to set.  It must not be zero.
412 *
413 * @return The previous state.
414 */
415States_Control _Thread_Set_state_locked(
416  Thread_Control *the_thread,
417  States_Control  state
418);
419
420/**
421 * @brief Sets the specified thread state.
422 *
423 * In the case the previous state is the ready state, then the thread is blocked
424 * by the scheduler.
425 *
426 * @param[in, out] the_thread The thread.
427 * @param state The state to set.  It must not be zero.
428 *
429 * @return The previous state.
430 */
431States_Control _Thread_Set_state(
432  Thread_Control *the_thread,
433  States_Control  state
434);
435
436/**
437 * @brief Initializes enviroment for a thread.
438 *
439 * This routine initializes the context of @a the_thread to its
440 * appropriate starting state.
441 *
442 * @param[in, out] the_thread The pointer to the thread control block.
443 */
444void _Thread_Load_environment(
445  Thread_Control *the_thread
446);
447
448/**
449 * @brief Calls the start kinds idle entry of the thread.
450 *
451 * @param executing The currently executing thread.
452 */
453void _Thread_Entry_adaptor_idle( Thread_Control *executing );
454
455/**
456 * @brief Calls the start kinds numeric entry of the thread.
457 *
458 * @param executing The currently executing thread.
459 */
460void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
461
462/**
463 * @brief Calls the start kinds pointer entry of the thread.
464 *
465 * Stores the return value in the Wait.return_argument of the thread.
466 *
467 * @param executing The currently executing thread.
468 */
469void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
470
471/**
472 * @brief Wrapper function for all threads.
473 *
474 * This routine is the wrapper function for all threads.  It is
475 * the starting point for all threads.  The user provided thread
476 * entry point is invoked by this routine.  Operations
477 * which must be performed immediately before and after the user's
478 * thread executes are found here.
479 *
480 * @note On entry, it is assumed all interrupts are blocked and that this
481 * routine needs to set the initial isr level.  This may or may not
482 * actually be needed by the context switch routine and as a result
483 * interrupts may already be at there proper level.  Either way,
484 * setting the initial isr level properly here is safe.
485 */
486void _Thread_Handler( void );
487
488/**
489 * @brief Acquires the lock context in a critical section.
490 *
491 * @param the_thread The thread to acquire the lock context.
492 * @param lock_context The lock context.
493 */
494RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
495  Thread_Control   *the_thread,
496  ISR_lock_Context *lock_context
497)
498{
499  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
500}
501
502/**
503 * @brief Disables interrupts and acquires the lock_context.
504 *
505 * @param the_thread The thread to acquire the lock context.
506 * @param lock_context The lock context.
507 */
508RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
509  Thread_Control   *the_thread,
510  ISR_lock_Context *lock_context
511)
512{
513  _ISR_lock_ISR_disable( lock_context );
514  _Thread_State_acquire_critical( the_thread, lock_context );
515}
516
517/**
518 * @brief Disables interrupts and acquires the lock context for the currently
519 *      executing thread.
520 *
521 * @param lock_context The lock context.
522 *
523 * @return The currently executing thread.
524 */
525RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
526  ISR_lock_Context *lock_context
527)
528{
529  Thread_Control *executing;
530
531  _ISR_lock_ISR_disable( lock_context );
532  executing = _Thread_Executing;
533  _Thread_State_acquire_critical( executing, lock_context );
534
535  return executing;
536}
537
538/**
539 * @brief Release the lock context in a critical section.
540 *
541 * @param the_thread The thread to release the lock context.
542 * @param lock_context The lock context.
543 */
544RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
545  Thread_Control   *the_thread,
546  ISR_lock_Context *lock_context
547)
548{
549  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
550}
551
552/**
553 * @brief Releases the lock context and enables interrupts.
554 *
555 * @param[in, out] the_thread The thread to release the lock context.
556 * @param[out] lock_context The lock context.
557 */
558RTEMS_INLINE_ROUTINE void _Thread_State_release(
559  Thread_Control   *the_thread,
560  ISR_lock_Context *lock_context
561)
562{
563  _Thread_State_release_critical( the_thread, lock_context );
564  _ISR_lock_ISR_enable( lock_context );
565}
566
567/**
568 * @brief Checks if the thread is owner of the lock of the join queue.
569 *
570 * @param the_thread The thread for the verification.
571 *
572 * @retval true The thread is owner of the lock of the join queue.
573 * @retval false The thread is not owner of the lock of the join queue.
574 */
575#if defined(RTEMS_DEBUG)
576RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
577  const Thread_Control *the_thread
578)
579{
580  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
581}
582#endif
583
584/**
585 * @brief Performs the priority actions specified by the thread queue context
586 * along the thread queue path.
587 *
588 * The caller must be the owner of the thread wait lock.
589 *
590 * @param start_of_path The start thread of the thread queue path.
591 * @param queue_context The thread queue context specifying the thread queue
592 *   path and initial thread priority actions.
593 *
594 * @see _Thread_queue_Path_acquire_critical().
595 */
596void _Thread_Priority_perform_actions(
597  Thread_Control       *start_of_path,
598  Thread_queue_Context *queue_context
599);
600
601/**
602 * @brief Adds the specified thread priority node to the corresponding thread
603 * priority aggregation.
604 *
605 * The caller must be the owner of the thread wait lock.
606 *
607 * @param the_thread The thread.
608 * @param priority_node The thread priority node to add.
609 * @param queue_context The thread queue context to return an updated set of
610 *   threads for _Thread_Priority_update().  The thread queue context must be
611 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
612 *   call of this function.
613 *
614 * @see _Thread_Wait_acquire().
615 */
616void _Thread_Priority_add(
617  Thread_Control       *the_thread,
618  Priority_Node        *priority_node,
619  Thread_queue_Context *queue_context
620);
621
622/**
623 * @brief Removes the specified thread priority node from the corresponding
624 * thread priority aggregation.
625 *
626 * The caller must be the owner of the thread wait lock.
627 *
628 * @param the_thread The thread.
629 * @param priority_node The thread priority node to remove.
630 * @param queue_context The thread queue context to return an updated set of
631 *   threads for _Thread_Priority_update().  The thread queue context must be
632 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
633 *   call of this function.
634 *
635 * @see _Thread_Wait_acquire().
636 */
637void _Thread_Priority_remove(
638  Thread_Control       *the_thread,
639  Priority_Node        *priority_node,
640  Thread_queue_Context *queue_context
641);
642
643/**
644 * @brief Propagates a thread priority value change in the specified thread
645 * priority node to the corresponding thread priority aggregation.
646 *
647 * The caller must be the owner of the thread wait lock.
648 *
649 * @param the_thread The thread.
650 * @param[out] priority_node The thread priority node to change.
651 * @param prepend_it In case this is true, then the thread is prepended to
652 *   its priority group in its home scheduler instance, otherwise it is
653 *   appended.
654 * @param queue_context The thread queue context to return an updated set of
655 *   threads for _Thread_Priority_update().  The thread queue context must be
656 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
657 *   call of this function.
658 *
659 * @see _Thread_Wait_acquire().
660 */
661void _Thread_Priority_changed(
662  Thread_Control       *the_thread,
663  Priority_Node        *priority_node,
664  bool                  prepend_it,
665  Thread_queue_Context *queue_context
666);
667
668/**
669 * @brief Changes the thread priority value of the specified thread priority
670 * node in the corresponding thread priority aggregation.
671 *
672 * The caller must be the owner of the thread wait lock.
673 *
674 * @param the_thread The thread.
675 * @param[out] priority_node The thread priority node to change.
676 * @param new_priority The new thread priority value of the thread priority
677 *   node to change.
678 * @param prepend_it In case this is true, then the thread is prepended to
679 *   its priority group in its home scheduler instance, otherwise it is
680 *   appended.
681 * @param queue_context The thread queue context to return an updated set of
682 *   threads for _Thread_Priority_update().  The thread queue context must be
683 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
684 *   call of this function.
685 *
686 * @see _Thread_Wait_acquire().
687 */
688RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
689  Thread_Control       *the_thread,
690  Priority_Node        *priority_node,
691  Priority_Control      new_priority,
692  bool                  prepend_it,
693  Thread_queue_Context *queue_context
694)
695{
696  _Priority_Node_set_priority( priority_node, new_priority );
697  _Thread_Priority_changed(
698    the_thread,
699    priority_node,
700    prepend_it,
701    queue_context
702  );
703}
704
705/**
706 * @brief Replaces the victim priority node with the replacement priority node
707 * in the corresponding thread priority aggregation.
708 *
709 * The caller must be the owner of the thread wait lock.
710 *
711 * @param the_thread The thread.
712 * @param victim_node The victim thread priority node.
713 * @param replacement_node The replacement thread priority node.
714 *
715 * @see _Thread_Wait_acquire().
716 */
717void _Thread_Priority_replace(
718  Thread_Control *the_thread,
719  Priority_Node  *victim_node,
720  Priority_Node  *replacement_node
721);
722
723/**
724 * @brief Updates the priority of all threads in the set
725 *
726 * @param queue_context The thread queue context to return an updated set of
727 *   threads for _Thread_Priority_update().  The thread queue context must be
728 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
729 *   call of this function.
730 *
731 * @see _Thread_Priority_add(), _Thread_Priority_change(),
732 *   _Thread_Priority_changed() and _Thread_Priority_remove().
733 */
734void _Thread_Priority_update( Thread_queue_Context *queue_context );
735
736/**
737 * @brief Updates the priority of the thread and changes it sticky level.
738 *
739 * @param the_thread The thread.
740 * @param sticky_level_change The new value for the sticky level.
741 */
742#if defined(RTEMS_SMP)
743void _Thread_Priority_and_sticky_update(
744  Thread_Control *the_thread,
745  int             sticky_level_change
746);
747#endif
748
749/**
750 * @brief Checks if the left thread priority is less than the right thread
751 *      priority in the intuitive sense of priority.
752 *
753 * @param left The left thread priority.
754 * @param right The right thread priority.
755 *
756 * @retval true The left priority is less in the intuitive sense.
757 * @retval false The left priority is greater or equal in the intuitive sense.
758 */
759RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
760  Priority_Control left,
761  Priority_Control right
762)
763{
764  return left > right;
765}
766
767/**
768 * @brief Returns the highest priority of the left and right thread priorities
769 * in the intuitive sense of priority.
770 *
771 * @param left The left thread priority.
772 * @param right The right thread priority.
773 *
774 * @return The highest priority in the intuitive sense of priority.
775 */
776RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
777  Priority_Control left,
778  Priority_Control right
779)
780{
781  return _Thread_Priority_less_than( left, right ) ? right : left;
782}
783
784/**
785 * @brief Gets object information for the object id.
786 *
787 * @param id The id of the object information.
788 *
789 * @retval pointer The object information for this id.
790 * @retval NULL The object id is not valid.
791 */
792RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
793  Objects_Id id
794)
795{
796  uint32_t the_api;
797
798  the_api = _Objects_Get_API( id );
799
800  if ( !_Objects_Is_api_valid( the_api ) ) {
801    return NULL;
802  }
803
804  /*
805   * Threads are always first class :)
806   *
807   * There is no need to validate the object class of the object identifier,
808   * since this will be done by the object get methods.
809   */
810  return _Objects_Information_table[ the_api ][ 1 ];
811}
812
813/**
814 * @brief Gets a thread by its identifier.
815 *
816 * @see _Objects_Get().
817 *
818 * @param id The id of the thread.
819 * @param lock_context The lock context.
820 */
821Thread_Control *_Thread_Get(
822  Objects_Id         id,
823  ISR_lock_Context  *lock_context
824);
825
826/**
827 * @brief Gets the cpu of the thread's scheduler.
828 *
829 * @param thread The thread.
830 *
831 * @return The cpu of the thread's scheduler.
832 */
833RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
834  const Thread_Control *thread
835)
836{
837#if defined(RTEMS_SMP)
838  return thread->Scheduler.cpu;
839#else
840  (void) thread;
841
842  return _Per_CPU_Get();
843#endif
844}
845
846/**
847 * @brief Sets the cpu of the thread's scheduler.
848 *
849 * @param[out] thread The thread.
850 * @param cpu The cpu to set.
851 */
852RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
853  Thread_Control *thread,
854  Per_CPU_Control *cpu
855)
856{
857#if defined(RTEMS_SMP)
858  thread->Scheduler.cpu = cpu;
859#else
860  (void) thread;
861  (void) cpu;
862#endif
863}
864
865/**
866 * @brief Checks if the thread is the currently executing thread.
867 *
868 * This function returns true if the_thread is the currently executing
869 * thread, and false otherwise.
870 *
871 * @param the_thread The thread to verify if it is the currently executing thread.
872 *
873 * @retval true @a the_thread is the currently executing one.
874 * @retval false @a the_thread is not the currently executing one.
875 */
876RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
877  const Thread_Control *the_thread
878)
879{
880  return ( the_thread == _Thread_Executing );
881}
882
883#if defined(RTEMS_SMP)
884/**
885 * @brief Checks if the thread executes currently on some processor in the
886 * system.
887 *
888 * Do not confuse this with _Thread_Is_executing() which checks only the
889 * current processor.
890 *
891 * @param the_thread The thread for the verification.
892 *
893 * @retval true @a the_thread is the currently executing one.
894 * @retval false @a the_thread is not the currently executing one.
895 */
896RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
897  const Thread_Control *the_thread
898)
899{
900  return _CPU_Context_Get_is_executing( &the_thread->Registers );
901}
902#endif
903
904/**
905 * @brief Checks if the thread is the heir.
906 *
907 * This function returns true if the_thread is the heir
908 * thread, and false otherwise.
909 *
910 * @param the_thread The thread for the verification.
911 *
912 * @retval true @a the_thread is the heir.
913 * @retval false @a the_thread is not the heir.
914 */
915RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
916  const Thread_Control *the_thread
917)
918{
919  return ( the_thread == _Thread_Heir );
920}
921
922/**
923 * @brief Unblocks the thread.
924 *
925 * This routine clears any blocking state for the_thread.  It performs
926 * any necessary scheduling operations including the selection of
927 * a new heir thread.
928 *
929 * @param[in, out] the_thread The thread to unblock.
930 */
931RTEMS_INLINE_ROUTINE void _Thread_Unblock (
932  Thread_Control *the_thread
933)
934{
935  _Thread_Clear_state( the_thread, STATES_BLOCKED );
936}
937
938/**
939 * @brief Checks if the floating point context of the thread is currently
940 *      loaded in the floating point unit.
941 *
942 * This function returns true if the floating point context of
943 * the_thread is currently loaded in the floating point unit, and
944 * false otherwise.
945 *
946 * @param the_thread The thread for the verification.
947 *
948 * @retval true The floating point context of @a the_thread is currently
949 *      loaded in the floating point unit.
950 * @retval false The floating point context of @a the_thread is currently not
951 *      loaded in the floating point unit.
952 */
953#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
954RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
955  const Thread_Control *the_thread
956)
957{
958  return ( the_thread == _Thread_Allocated_fp );
959}
960#endif
961
962/*
963 * If the CPU has hardware floating point, then we must address saving
964 * and restoring it as part of the context switch.
965 *
966 * The second conditional compilation section selects the algorithm used
967 * to context switch between floating point tasks.  The deferred algorithm
968 * can be significantly better in a system with few floating point tasks
969 * because it reduces the total number of save and restore FP context
970 * operations.  However, this algorithm can not be used on all CPUs due
971 * to unpredictable use of FP registers by some compilers for integer
972 * operations.
973 */
974
975/**
976 * @brief Saves the executing thread's floating point area.
977 *
978 * @param executing The currently executing thread.
979 */
980RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
981{
982#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
983#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
984  if ( executing->fp_context != NULL )
985    _Context_Save_fp( &executing->fp_context );
986#endif
987#endif
988}
989
990/**
991 * @brief Restores the executing thread's floating point area.
992 *
993 * @param executing The currently executing thread.
994 */
995RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
996{
997#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
998#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
999  if ( (executing->fp_context != NULL) &&
1000       !_Thread_Is_allocated_fp( executing ) ) {
1001    if ( _Thread_Allocated_fp != NULL )
1002      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
1003    _Context_Restore_fp( &executing->fp_context );
1004    _Thread_Allocated_fp = executing;
1005  }
1006#else
1007  if ( executing->fp_context != NULL )
1008    _Context_Restore_fp( &executing->fp_context );
1009#endif
1010#endif
1011}
1012
1013/**
1014 * @brief Deallocates the currently loaded floating point context.
1015 *
1016 * This routine is invoked when the currently loaded floating
1017 * point context is now longer associated with an active thread.
1018 */
1019#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
1020RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
1021{
1022  _Thread_Allocated_fp = NULL;
1023}
1024#endif
1025
1026/**
1027 * @brief Checks if dispatching is disabled.
1028 *
1029 * This function returns true if dispatching is disabled, and false
1030 * otherwise.
1031 *
1032 * @retval true Dispatching is disabled.
1033 * @retval false Dispatching is enabled.
1034 */
1035RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
1036{
1037  return ( _Thread_Dispatch_necessary );
1038}
1039
1040/**
1041 * @brief Checks if the thread is NULL.
1042 *
1043 * @param the_thread The thread for the verification.
1044 *
1045 * @retval true The thread is @c NULL.
1046 * @retval false The thread is not @c NULL.
1047 */
1048RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
1049  const Thread_Control *the_thread
1050)
1051{
1052  return ( the_thread == NULL );
1053}
1054
1055/**
1056 * @brief Checks if proxy is blocking.
1057 *
1058 * status which indicates that a proxy is blocking, and false otherwise.
1059 *
1060 * @param code The code for the verification.
1061 *
1062 * @retval true Status indicates that a proxy is blocking.
1063 * @retval false Status indicates that a proxy is not blocking.
1064 */
1065RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
1066  uint32_t   code
1067)
1068{
1069  return (code == THREAD_STATUS_PROXY_BLOCKING);
1070}
1071
1072/**
1073 * @brief Gets the maximum number of internal threads.
1074 *
1075 * @return The maximum number of internal threads.
1076 */
1077RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
1078{
1079  /* Idle threads */
1080  uint32_t maximum_internal_threads =
1081    rtems_configuration_get_maximum_processors();
1082
1083  /* MPCI thread */
1084#if defined(RTEMS_MULTIPROCESSING)
1085  if ( _System_state_Is_multiprocessing ) {
1086    ++maximum_internal_threads;
1087  }
1088#endif
1089
1090  return maximum_internal_threads;
1091}
1092
1093/**
1094 * @brief Allocates an internal thread and returns it.
1095 *
1096 * @retval pointer Pointer to the allocated Thread_Control.
1097 * @retval NULL The operation failed.
1098 */
1099RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
1100{
1101  return (Thread_Control *)
1102    _Objects_Allocate_unprotected( &_Thread_Information.Objects );
1103}
1104
1105/**
1106 * @brief Gets the heir of the processor and makes it executing.
1107 *
1108 * Must be called with interrupts disabled.  The thread dispatch necessary
1109 * indicator is cleared as a side-effect.
1110 *
1111 * @param[in, out] cpu_self The processor to get the heir of.
1112 *
1113 * @return The heir thread.
1114 *
1115 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
1116 * _Thread_Dispatch_update_heir().
1117 */
1118RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
1119  Per_CPU_Control *cpu_self
1120)
1121{
1122  Thread_Control *heir;
1123
1124  heir = cpu_self->heir;
1125  cpu_self->dispatch_necessary = false;
1126  cpu_self->executing = heir;
1127
1128  return heir;
1129}
1130
1131/**
1132 * @brief Updates the cpu time used of the thread.
1133 *
1134 * @param[in, out] the_thread The thread to add additional cpu time that is
1135 *      used.
1136 * @param cpu The cpu.
1137 */
1138RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
1139  Thread_Control  *the_thread,
1140  Per_CPU_Control *cpu
1141)
1142{
1143  Timestamp_Control last;
1144  Timestamp_Control ran;
1145
1146  last = cpu->cpu_usage_timestamp;
1147  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
1148  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
1149  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
1150}
1151
1152/**
1153 * @brief Updates the used cpu time for the heir and dispatches a new heir.
1154 *
1155 * @param[in, out] cpu_self The current processor.
1156 * @param[in, out] cpu_for_heir The processor to do a dispatch on.
1157 * @param heir The new heir for @a cpu_for_heir.
1158 */
1159#if defined( RTEMS_SMP )
1160RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
1161  Per_CPU_Control *cpu_self,
1162  Per_CPU_Control *cpu_for_heir,
1163  Thread_Control  *heir
1164)
1165{
1166  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
1167
1168  cpu_for_heir->heir = heir;
1169
1170  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
1171}
1172#endif
1173
1174/**
1175 * @brief Gets the used cpu time of the thread and stores it in the given
1176 *      Timestamp_Control.
1177 *
1178 * @param the_thread The thread to get the used cpu time of.
1179 * @param[out] cpu_time_used Stores the used cpu time of @a the_thread.
1180 */
1181void _Thread_Get_CPU_time_used(
1182  Thread_Control    *the_thread,
1183  Timestamp_Control *cpu_time_used
1184);
1185
1186/**
1187 * @brief Initializes the control chain of the action control.
1188 *
1189 * @param[out] action_control The action control to initialize.
1190 */
1191RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
1192  Thread_Action_control *action_control
1193)
1194{
1195  _Chain_Initialize_empty( &action_control->Chain );
1196}
1197
1198/**
1199 * @brief Initializes the Thread action.
1200 *
1201 * @param[out] action The Thread_Action to initialize.
1202 */
1203RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
1204  Thread_Action *action
1205)
1206{
1207  _Chain_Set_off_chain( &action->Node );
1208}
1209
1210/**
1211 * @brief Adds a post switch action to the thread with the given handler.
1212 *
1213 * @param[in, out] the_thread The thread.
1214 * @param[in,  out] action The action to add.
1215 * @param handler The handler for the action.
1216 */
1217RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
1218  Thread_Control        *the_thread,
1219  Thread_Action         *action,
1220  Thread_Action_handler  handler
1221)
1222{
1223  Per_CPU_Control *cpu_of_thread;
1224
1225  _Assert( _Thread_State_is_owner( the_thread ) );
1226
1227  cpu_of_thread = _Thread_Get_CPU( the_thread );
1228
1229  action->handler = handler;
1230
1231  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
1232
1233  _Chain_Append_if_is_off_chain_unprotected(
1234    &the_thread->Post_switch_actions.Chain,
1235    &action->Node
1236  );
1237}
1238
1239/**
1240 * @brief Checks if the thread life state is restarting.
1241 *
1242 * @param life_state The thread life state for the verification.
1243 *
1244 * @retval true @a life_state is restarting.
1245 * @retval false @a life_state is not restarting.
1246 */
1247RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
1248  Thread_Life_state life_state
1249)
1250{
1251  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
1252}
1253
1254/**
1255 * @brief Checks if the thread life state is terminating.
1256 *
1257 * @param life_state The thread life state for the verification.
1258 *
1259 * @retval true @a life_state is terminating.
1260 * @retval false @a life_state is not terminating.
1261 */
1262RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
1263  Thread_Life_state life_state
1264)
1265{
1266  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
1267}
1268
1269/**
1270 * @brief Checks if the thread life state allos life change.
1271 *
1272 * @param life_state The thread life state for the verification.
1273 *
1274 * @retval true @a life_state allows life change.
1275 * @retval false @a life_state does not allow life change.
1276 */
1277RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
1278  Thread_Life_state life_state
1279)
1280{
1281  return ( life_state
1282    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
1283}
1284
1285/**
1286 * @brief Checks if the thread life state is life changing.
1287 *
1288 * @param life_state The thread life state for the verification.
1289 *
1290 * @retval true @a life_state is life changing.
1291 * @retval false @a life_state is not life changing.
1292 */
1293RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
1294  Thread_Life_state life_state
1295)
1296{
1297  return ( life_state
1298    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
1299}
1300
1301/**
1302 * @brief Checks if the thread is joinable.
1303 *
1304 * @param the_thread The thread for the verification.
1305 *
1306 * @retval true @a life_state is joinable.
1307 * @retval false @a life_state is not joinable.
1308 */
1309RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
1310  const Thread_Control *the_thread
1311)
1312{
1313  _Assert( _Thread_State_is_owner( the_thread ) );
1314  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
1315}
1316
1317/**
1318 * @brief Increments the thread's resource count.
1319 *
1320 * @param[in, out] the_thread The thread to increase the resource count of.
1321 */
1322RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
1323  Thread_Control *the_thread
1324)
1325{
1326#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1327  ++the_thread->resource_count;
1328#else
1329  (void) the_thread;
1330#endif
1331}
1332
1333/**
1334 * @brief Decrements the thread's resource count.
1335 *
1336 * @param[in, out] the_thread The thread to decrement the resource count of.
1337 */
1338RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
1339  Thread_Control *the_thread
1340)
1341{
1342#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1343  --the_thread->resource_count;
1344#else
1345  (void) the_thread;
1346#endif
1347}
1348
1349#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1350/**
1351 * @brief Checks if the thread owns resources.
1352 *
1353 * Resources are accounted with the Thread_Control::resource_count resource
1354 * counter.  This counter is used by mutex objects for example.
1355 *
1356 * @param the_thread The thread.
1357 *
1358 * @retval true The thread owns resources.
1359 * @retval false The thread does not own resources.
1360 */
1361RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
1362  const Thread_Control *the_thread
1363)
1364{
1365  return the_thread->resource_count != 0;
1366}
1367#endif
1368
1369#if defined(RTEMS_SMP)
1370/**
1371 * @brief Cancels the thread's need for help.
1372 *
1373 * @param the_thread The thread to cancel the help request of.
1374 * @param cpu The cpu to get the lock context of in order to
1375 *      cancel the help request.
1376 */
1377RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
1378  Thread_Control  *the_thread,
1379  Per_CPU_Control *cpu
1380)
1381{
1382  ISR_lock_Context lock_context;
1383
1384  _Per_CPU_Acquire( cpu, &lock_context );
1385
1386  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
1387    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
1388    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
1389  }
1390
1391  _Per_CPU_Release( cpu, &lock_context );
1392}
1393#endif
1394
1395/**
1396 * @brief Gets the home scheduler of the thread.
1397 *
1398 * @param the_thread The thread to get the home scheduler of.
1399 *
1400 * @return The thread's home scheduler.
1401 */
1402RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
1403  const Thread_Control *the_thread
1404)
1405{
1406#if defined(RTEMS_SMP)
1407  return the_thread->Scheduler.home_scheduler;
1408#else
1409  (void) the_thread;
1410  return &_Scheduler_Table[ 0 ];
1411#endif
1412}
1413
1414/**
1415 * @brief Gets the scheduler's home node.
1416 *
1417 * @param the_thread The thread to get the home node of.
1418 *
1419 * @return The thread's home node.
1420 */
1421RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1422  const Thread_Control *the_thread
1423)
1424{
1425#if defined(RTEMS_SMP)
1426  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1427  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1428    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1429  );
1430#else
1431  return the_thread->Scheduler.nodes;
1432#endif
1433}
1434
1435/**
1436 * @brief Gets the thread's scheduler node by index.
1437 *
1438 * @param the_thread The thread of which to get a scheduler node.
1439 * @param scheduler_index The index of the desired scheduler node.
1440 *
1441 * @return The scheduler node with the specified index.
1442 */
1443RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1444  const Thread_Control *the_thread,
1445  size_t                scheduler_index
1446)
1447{
1448#if defined(RTEMS_SMP)
1449  return (Scheduler_Node *)
1450    ( (uintptr_t) the_thread->Scheduler.nodes
1451      + scheduler_index * _Scheduler_Node_size );
1452#else
1453  _Assert( scheduler_index == 0 );
1454  (void) scheduler_index;
1455  return the_thread->Scheduler.nodes;
1456#endif
1457}
1458
1459#if defined(RTEMS_SMP)
1460/**
1461 * @brief Acquires the lock context in a critical section.
1462 *
1463 * @param the_thread The thread to acquire the lock context.
1464 * @param lock_context The lock context.
1465 */
1466RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1467  Thread_Control   *the_thread,
1468  ISR_lock_Context *lock_context
1469)
1470{
1471  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1472}
1473
1474/**
1475 * @brief Releases the lock context in a critical section.
1476 *
1477 * @param the_thread The thread to release the lock context.
1478 * @param lock_context The lock context.
1479 */
1480RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1481  Thread_Control   *the_thread,
1482  ISR_lock_Context *lock_context
1483)
1484{
1485  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1486}
1487
1488/**
1489 * @brief Process the thread's scheduler requests.
1490 *
1491 * @param[in, out] the_thread The thread for the operation.
1492 */
1493void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1494
1495/**
1496 * @brief Add a scheduler request to the thread.
1497 *
1498 * @param[in, out] the_thread The thread to add a scheduler request to.
1499 * @param[in, out] scheduler_node The scheduler node for the request.
1500 * @param request The request to add.
1501 */
1502RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1503  Thread_Control         *the_thread,
1504  Scheduler_Node         *scheduler_node,
1505  Scheduler_Node_request  request
1506)
1507{
1508  ISR_lock_Context       lock_context;
1509  Scheduler_Node_request current_request;
1510
1511  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1512
1513  current_request = scheduler_node->Thread.request;
1514
1515  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1516    _Assert(
1517      request == SCHEDULER_NODE_REQUEST_ADD
1518        || request == SCHEDULER_NODE_REQUEST_REMOVE
1519    );
1520    _Assert( scheduler_node->Thread.next_request == NULL );
1521    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1522    the_thread->Scheduler.requests = scheduler_node;
1523  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1524    _Assert(
1525      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1526        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1527      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1528        && request == SCHEDULER_NODE_REQUEST_ADD )
1529    );
1530    request = SCHEDULER_NODE_REQUEST_NOTHING;
1531  }
1532
1533  scheduler_node->Thread.request = request;
1534
1535  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1536}
1537
1538/**
1539 * @brief Adds a wait node to the thread and adds a corresponding
1540 *      request to the thread.
1541 *
1542 * @param[in, out] the_thread The thread to add the wait node to.
1543 * @param scheduler_node The scheduler node which provides the wait node.
1544 */
1545RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1546  Thread_Control *the_thread,
1547  Scheduler_Node *scheduler_node
1548)
1549{
1550  _Chain_Append_unprotected(
1551    &the_thread->Scheduler.Wait_nodes,
1552    &scheduler_node->Thread.Wait_node
1553  );
1554  _Thread_Scheduler_add_request(
1555    the_thread,
1556    scheduler_node,
1557    SCHEDULER_NODE_REQUEST_ADD
1558  );
1559}
1560
1561/**
1562 * @brief Remove a wait node from the thread and add a corresponding request to
1563 *      it.
1564 *
1565 * @param the_thread The thread to add the request to remove a wait node.
1566 * @param scheduler_node The scheduler node to remove a wait node from.
1567 */
1568RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1569  Thread_Control *the_thread,
1570  Scheduler_Node *scheduler_node
1571)
1572{
1573  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1574  _Thread_Scheduler_add_request(
1575    the_thread,
1576    scheduler_node,
1577    SCHEDULER_NODE_REQUEST_REMOVE
1578  );
1579}
1580#endif
1581
1582/**
1583 * @brief Returns the priority of the thread.
1584 *
1585 * Returns the user API and thread wait information relevant thread priority.
1586 * This includes temporary thread priority adjustments due to locking
1587 * protocols, a job release or the POSIX sporadic server for example.
1588 *
1589 * @param the_thread The thread of which to get the priority.
1590 *
1591 * @return The priority of the thread.
1592 */
1593RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1594  const Thread_Control *the_thread
1595)
1596{
1597  Scheduler_Node *scheduler_node;
1598
1599  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1600  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1601}
1602
1603/**
1604 * @brief Returns the unmapped priority of the thread.
1605 *
1606 * @param the_thread The thread of which to get the unmapped priority.
1607 *
1608 * @return The unmapped priority of the thread.
1609 */
1610RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_unmapped_priority(
1611  const Thread_Control *the_thread
1612)
1613{
1614  return SCHEDULER_PRIORITY_UNMAP( _Thread_Get_priority( the_thread ) );
1615}
1616
1617/**
1618 * @brief Returns the unmapped real priority of the thread.
1619 *
1620 * @param the_thread The thread of which to get the unmapped real priority.
1621 *
1622 * @return The unmapped real priority of the thread.
1623 */
1624RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_unmapped_real_priority(
1625  const Thread_Control *the_thread
1626)
1627{
1628  return SCHEDULER_PRIORITY_UNMAP( the_thread->Real_priority.priority );
1629}
1630
1631/**
1632 * @brief Acquires the thread wait default lock inside a critical section
1633 * (interrupts disabled).
1634 *
1635 * @param[in, out] the_thread The thread.
1636 * @param lock_context The lock context used for the corresponding lock
1637 *   release.
1638 *
1639 * @see _Thread_Wait_release_default_critical().
1640 */
1641RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1642  Thread_Control   *the_thread,
1643  ISR_lock_Context *lock_context
1644)
1645{
1646  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1647}
1648
1649/**
1650 * @brief Acquires the thread wait default lock and returns the executing
1651 * thread.
1652 *
1653 * @param lock_context The lock context used for the corresponding lock
1654 *   release.
1655 *
1656 * @return The executing thread.
1657 *
1658 * @see _Thread_Wait_release_default().
1659 */
1660RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1661  ISR_lock_Context *lock_context
1662)
1663{
1664  Thread_Control *executing;
1665
1666  _ISR_lock_ISR_disable( lock_context );
1667  executing = _Thread_Executing;
1668  _Thread_Wait_acquire_default_critical( executing, lock_context );
1669
1670  return executing;
1671}
1672
1673/**
1674 * @brief Acquires the thread wait default lock and disables interrupts.
1675 *
1676 * @param[in, out] the_thread The thread.
1677 * @param[out] lock_context The lock context used for the corresponding lock
1678 *   release.
1679 *
1680 * @see _Thread_Wait_release_default().
1681 */
1682RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1683  Thread_Control   *the_thread,
1684  ISR_lock_Context *lock_context
1685)
1686{
1687  _ISR_lock_ISR_disable( lock_context );
1688  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1689}
1690
1691/**
1692 * @brief Releases the thread wait default lock inside a critical section
1693 * (interrupts disabled).
1694 *
1695 * The previous interrupt status is not restored.
1696 *
1697 * @param[in, out] the_thread The thread.
1698 * @param lock_context The lock context used for the corresponding lock
1699 *   acquire.
1700 */
1701RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1702  Thread_Control   *the_thread,
1703  ISR_lock_Context *lock_context
1704)
1705{
1706  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1707}
1708
1709/**
1710 * @brief Releases the thread wait default lock and restores the previous
1711 * interrupt status.
1712 *
1713 * @param[in, out] the_thread The thread.
1714 * @param[out] lock_context The lock context used for the corresponding lock
1715 *   acquire.
1716 */
1717RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1718  Thread_Control   *the_thread,
1719  ISR_lock_Context *lock_context
1720)
1721{
1722  _Thread_Wait_release_default_critical( the_thread, lock_context );
1723  _ISR_lock_ISR_enable( lock_context );
1724}
1725
1726#if defined(RTEMS_SMP)
1727#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1728  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1729
1730/**
1731 * @brief Removes the first pending wait lock request.
1732 *
1733 * @param the_thread The thread to remove the request from.
1734 * @param queue_lock_context The queue lock context.
1735 */
1736RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1737  Thread_Control            *the_thread,
1738  Thread_queue_Lock_context *queue_lock_context
1739)
1740{
1741  Chain_Node *first;
1742
1743  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1744  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1745
1746  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1747    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1748  }
1749}
1750
1751/**
1752 * @brief Acquires the wait queue inside a critical section.
1753 *
1754 * @param queue The queue that acquires.
1755 * @param queue_lock_context The queue lock context.
1756 */
1757RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1758  Thread_queue_Queue        *queue,
1759  Thread_queue_Lock_context *queue_lock_context
1760)
1761{
1762  _Thread_queue_Queue_acquire_critical(
1763    queue,
1764    &_Thread_Executing->Potpourri_stats,
1765    &queue_lock_context->Lock_context
1766  );
1767}
1768
1769/**
1770 * @brief Releases the wait queue inside a critical section.
1771 *
1772 * @param queue The queue that releases.
1773 * @param queue_lock_context The queue lock context.
1774 */
1775RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1776  Thread_queue_Queue        *queue,
1777  Thread_queue_Lock_context *queue_lock_context
1778)
1779{
1780  _Thread_queue_Queue_release_critical(
1781    queue,
1782    &queue_lock_context->Lock_context
1783  );
1784}
1785#endif
1786
1787/**
1788 * @brief Acquires the thread wait lock inside a critical section (interrupts
1789 * disabled).
1790 *
1791 * @param[in, out] the_thread The thread.
1792 * @param[in, out] queue_context The thread queue context for the corresponding
1793 *   _Thread_Wait_release_critical().
1794 */
1795RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1796  Thread_Control       *the_thread,
1797  Thread_queue_Context *queue_context
1798)
1799{
1800#if defined(RTEMS_SMP)
1801  Thread_queue_Queue *queue;
1802
1803  _Thread_Wait_acquire_default_critical(
1804    the_thread,
1805    &queue_context->Lock_context.Lock_context
1806  );
1807
1808  queue = the_thread->Wait.queue;
1809  queue_context->Lock_context.Wait.queue = queue;
1810
1811  if ( queue != NULL ) {
1812    _Thread_queue_Gate_add(
1813      &the_thread->Wait.Lock.Pending_requests,
1814      &queue_context->Lock_context.Wait.Gate
1815    );
1816    _Thread_Wait_release_default_critical(
1817      the_thread,
1818      &queue_context->Lock_context.Lock_context
1819    );
1820    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1821
1822    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1823      _Thread_Wait_release_queue_critical(
1824        queue,
1825        &queue_context->Lock_context
1826      );
1827      _Thread_Wait_acquire_default_critical(
1828        the_thread,
1829        &queue_context->Lock_context.Lock_context
1830      );
1831      _Thread_Wait_remove_request_locked(
1832        the_thread,
1833        &queue_context->Lock_context
1834      );
1835      _Assert( the_thread->Wait.queue == NULL );
1836    }
1837  }
1838#else
1839  (void) the_thread;
1840  (void) queue_context;
1841#endif
1842}
1843
1844/**
1845 * @brief Acquires the thread wait default lock and disables interrupts.
1846 *
1847 * @param[in, out] the_thread The thread.
1848 * @param[in, out] queue_context The thread queue context for the corresponding
1849 *   _Thread_Wait_release().
1850 */
1851RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1852  Thread_Control       *the_thread,
1853  Thread_queue_Context *queue_context
1854)
1855{
1856  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1857  _Thread_Wait_acquire_critical( the_thread, queue_context );
1858}
1859
1860/**
1861 * @brief Releases the thread wait lock inside a critical section (interrupts
1862 * disabled).
1863 *
1864 * The previous interrupt status is not restored.
1865 *
1866 * @param[in, out] the_thread The thread.
1867 * @param[in, out] queue_context The thread queue context used for corresponding
1868 *   _Thread_Wait_acquire_critical().
1869 */
1870RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1871  Thread_Control       *the_thread,
1872  Thread_queue_Context *queue_context
1873)
1874{
1875#if defined(RTEMS_SMP)
1876  Thread_queue_Queue *queue;
1877
1878  queue = queue_context->Lock_context.Wait.queue;
1879
1880  if ( queue != NULL ) {
1881    _Thread_Wait_release_queue_critical(
1882      queue, &queue_context->Lock_context
1883    );
1884    _Thread_Wait_acquire_default_critical(
1885      the_thread,
1886      &queue_context->Lock_context.Lock_context
1887    );
1888    _Thread_Wait_remove_request_locked(
1889      the_thread,
1890      &queue_context->Lock_context
1891    );
1892  }
1893
1894  _Thread_Wait_release_default_critical(
1895    the_thread,
1896    &queue_context->Lock_context.Lock_context
1897  );
1898#else
1899  (void) the_thread;
1900  (void) queue_context;
1901#endif
1902}
1903
1904/**
1905 * @brief Releases the thread wait lock and restores the previous interrupt
1906 * status.
1907 *
1908 * @param[in, out] the_thread The thread.
1909 * @param[in, out] queue_context The thread queue context used for corresponding
1910 *   _Thread_Wait_acquire().
1911 */
1912RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1913  Thread_Control       *the_thread,
1914  Thread_queue_Context *queue_context
1915)
1916{
1917  _Thread_Wait_release_critical( the_thread, queue_context );
1918  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1919}
1920
1921/**
1922 * @brief Claims the thread wait queue.
1923 *
1924 * The caller must not be the owner of the default thread wait lock.  The
1925 * caller must be the owner of the corresponding thread queue lock.  The
1926 * registration of the corresponding thread queue operations is deferred and
1927 * done after the deadlock detection.  This is crucial to support timeouts on
1928 * SMP configurations.
1929 *
1930 * @param[in, out] the_thread The thread.
1931 * @param[in, out] queue The new thread queue.
1932 *
1933 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
1934 */
1935RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1936  Thread_Control     *the_thread,
1937  Thread_queue_Queue *queue
1938)
1939{
1940  ISR_lock_Context lock_context;
1941
1942  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1943
1944  _Assert( the_thread->Wait.queue == NULL );
1945
1946#if defined(RTEMS_SMP)
1947  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1948  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1949  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1950#endif
1951
1952  the_thread->Wait.queue = queue;
1953
1954  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1955}
1956
1957/**
1958 * @brief Finalizes the thread wait queue claim via registration of the
1959 * corresponding thread queue operations.
1960 *
1961 * @param[in, out] the_thread The thread.
1962 * @param operations The corresponding thread queue operations.
1963 */
1964RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
1965  Thread_Control                *the_thread,
1966  const Thread_queue_Operations *operations
1967)
1968{
1969  the_thread->Wait.operations = operations;
1970}
1971
1972/**
1973 * @brief Removes a thread wait lock request.
1974 *
1975 * On SMP configurations, removes a thread wait lock request.
1976 *
1977 * On other configurations, this function does nothing.
1978 *
1979 * @param[in, out] the_thread The thread.
1980 * @param[in, out] queue_lock_context The thread queue lock context used for
1981 *   corresponding _Thread_Wait_acquire().
1982 */
1983RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1984  Thread_Control            *the_thread,
1985  Thread_queue_Lock_context *queue_lock_context
1986)
1987{
1988#if defined(RTEMS_SMP)
1989  ISR_lock_Context lock_context;
1990
1991  _Thread_Wait_acquire_default( the_thread, &lock_context );
1992  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1993  _Thread_Wait_release_default( the_thread, &lock_context );
1994#else
1995  (void) the_thread;
1996  (void) queue_lock_context;
1997#endif
1998}
1999
2000/**
2001 * @brief Restores the default thread wait queue and operations.
2002 *
2003 * The caller must be the owner of the current thread wait queue lock.
2004 *
2005 * On SMP configurations, the pending requests are updated to use the stale
2006 * thread queue operations.
2007 *
2008 * @param[in, out] the_thread The thread.
2009 *
2010 * @see _Thread_Wait_claim().
2011 */
2012RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
2013  Thread_Control *the_thread
2014)
2015{
2016#if defined(RTEMS_SMP)
2017  ISR_lock_Context  lock_context;
2018  Chain_Node       *node;
2019  const Chain_Node *tail;
2020
2021  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
2022
2023  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
2024  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
2025
2026  if ( node != tail ) {
2027    do {
2028      Thread_queue_Context *queue_context;
2029
2030      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
2031      queue_context->Lock_context.Wait.queue = NULL;
2032
2033      node = _Chain_Next( node );
2034    } while ( node != tail );
2035
2036    _Thread_queue_Gate_add(
2037      &the_thread->Wait.Lock.Pending_requests,
2038      &the_thread->Wait.Lock.Tranquilizer
2039    );
2040  } else {
2041    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
2042  }
2043#endif
2044
2045  the_thread->Wait.queue = NULL;
2046  the_thread->Wait.operations = &_Thread_queue_Operations_default;
2047
2048#if defined(RTEMS_SMP)
2049  _Thread_Wait_release_default_critical( the_thread, &lock_context );
2050#endif
2051}
2052
2053/**
2054 * @brief Tranquilizes the thread after a wait on a thread queue.
2055 *
2056 * After the violent blocking procedure this function makes the thread calm and
2057 * peaceful again so that it can carry out its normal work.
2058 *
2059 * On SMP configurations, ensures that all pending thread wait lock requests
2060 * completed before the thread is able to begin a new thread wait procedure.
2061 *
2062 * On other configurations, this function does nothing.
2063 *
2064 * It must be called after a _Thread_Wait_claim() exactly once
2065 *  - after the corresponding thread queue lock was released, and
2066 *  - the default wait state is restored or some other processor is about to do
2067 *    this.
2068 *
2069 * @param the_thread The thread.
2070 */
2071RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
2072  Thread_Control *the_thread
2073)
2074{
2075#if defined(RTEMS_SMP)
2076  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
2077#else
2078  (void) the_thread;
2079#endif
2080}
2081
2082/**
2083 * @brief Cancels a thread wait on a thread queue.
2084 *
2085 * @param[in, out] the_thread The thread.
2086 * @param queue_context The thread queue context used for corresponding
2087 *   _Thread_Wait_acquire().
2088 */
2089RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
2090  Thread_Control       *the_thread,
2091  Thread_queue_Context *queue_context
2092)
2093{
2094  Thread_queue_Queue *queue;
2095
2096  queue = the_thread->Wait.queue;
2097
2098#if defined(RTEMS_SMP)
2099  if ( queue != NULL ) {
2100    _Assert( queue_context->Lock_context.Wait.queue == queue );
2101#endif
2102
2103    ( *the_thread->Wait.operations->extract )(
2104      queue,
2105      the_thread,
2106      queue_context
2107    );
2108    _Thread_Wait_restore_default( the_thread );
2109
2110#if defined(RTEMS_SMP)
2111    _Assert( queue_context->Lock_context.Wait.queue == NULL );
2112    queue_context->Lock_context.Wait.queue = queue;
2113  }
2114#endif
2115}
2116
2117/**
2118 * @brief The initial thread wait flags value set by _Thread_Initialize().
2119 */
2120#define THREAD_WAIT_FLAGS_INITIAL 0x0U
2121
2122/**
2123 * @brief Mask to get the thread wait state flags.
2124 */
2125#define THREAD_WAIT_STATE_MASK 0xffU
2126
2127/**
2128 * @brief Indicates that the thread begins with the blocking operation.
2129 *
2130 * A blocking operation consists of an optional watchdog initialization and the
2131 * setting of the appropriate thread blocking state with the corresponding
2132 * scheduler block operation.
2133 */
2134#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
2135
2136/**
2137 * @brief Indicates that the thread completed the blocking operation.
2138 */
2139#define THREAD_WAIT_STATE_BLOCKED 0x2U
2140
2141/**
2142 * @brief Indicates that a condition to end the thread wait occurred.
2143 *
2144 * This could be a timeout, a signal, an event or a resource availability.
2145 */
2146#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
2147
2148/**
2149 * @brief Mask to get the thread wait class flags.
2150 */
2151#define THREAD_WAIT_CLASS_MASK 0xff00U
2152
2153/**
2154 * @brief Indicates that the thread waits for an event.
2155 */
2156#define THREAD_WAIT_CLASS_EVENT 0x100U
2157
2158/**
2159 * @brief Indicates that the thread waits for a system event.
2160 */
2161#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
2162
2163/**
2164 * @brief Indicates that the thread waits for an object.
2165 */
2166#define THREAD_WAIT_CLASS_OBJECT 0x400U
2167
2168/**
2169 * @brief Indicates that the thread waits for a period.
2170 */
2171#define THREAD_WAIT_CLASS_PERIOD 0x800U
2172
2173/**
2174 * @brief Sets the thread's wait flags.
2175 *
2176 * @param[in, out] the_thread The thread to set the wait flags of.
2177 * @param flags The flags to set.
2178 */
2179RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
2180  Thread_Control    *the_thread,
2181  Thread_Wait_flags  flags
2182)
2183{
2184#if defined(RTEMS_SMP)
2185  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
2186#else
2187  the_thread->Wait.flags = flags;
2188#endif
2189}
2190
2191/**
2192 * @brief Gets the thread's wait flags according to the ATOMIC_ORDER_RELAXED.
2193 *
2194 * @param the_thread The thread to get the wait flags of.
2195 *
2196 * @return The thread's wait flags.
2197 */
2198RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
2199  const Thread_Control *the_thread
2200)
2201{
2202#if defined(RTEMS_SMP)
2203  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
2204#else
2205  return the_thread->Wait.flags;
2206#endif
2207}
2208
2209/**
2210 * @brief Gets the thread's wait flags according to the ATOMIC_ORDER_ACQUIRE.
2211 *
2212 * @param the_thread The thread to get the wait flags of.
2213 *
2214 * @return The thread's wait flags.
2215 */
2216RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
2217  const Thread_Control *the_thread
2218)
2219{
2220#if defined(RTEMS_SMP)
2221  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
2222#else
2223  return the_thread->Wait.flags;
2224#endif
2225}
2226
2227/**
2228 * @brief Tries to change the thread wait flags with release semantics in case
2229 * of success.
2230 *
2231 * Must be called inside a critical section (interrupts disabled).
2232 *
2233 * In case the wait flags are equal to the expected wait flags, then the wait
2234 * flags are set to the desired wait flags.
2235 *
2236 * @param the_thread The thread.
2237 * @param expected_flags The expected wait flags.
2238 * @param desired_flags The desired wait flags.
2239 *
2240 * @retval true The wait flags were equal to the expected wait flags.
2241 * @retval false The wait flags were not equal to the expected wait flags.
2242 */
2243RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
2244  Thread_Control    *the_thread,
2245  Thread_Wait_flags  expected_flags,
2246  Thread_Wait_flags  desired_flags
2247)
2248{
2249  _Assert( _ISR_Get_level() != 0 );
2250
2251#if defined(RTEMS_SMP)
2252  return _Atomic_Compare_exchange_uint(
2253    &the_thread->Wait.flags,
2254    &expected_flags,
2255    desired_flags,
2256    ATOMIC_ORDER_RELEASE,
2257    ATOMIC_ORDER_RELAXED
2258  );
2259#else
2260  bool success = ( the_thread->Wait.flags == expected_flags );
2261
2262  if ( success ) {
2263    the_thread->Wait.flags = desired_flags;
2264  }
2265
2266  return success;
2267#endif
2268}
2269
2270/**
2271 * @brief Tries to change the thread wait flags with acquire semantics.
2272 *
2273 * In case the wait flags are equal to the expected wait flags, then the wait
2274 * flags are set to the desired wait flags.
2275 *
2276 * @param the_thread The thread.
2277 * @param expected_flags The expected wait flags.
2278 * @param desired_flags The desired wait flags.
2279 *
2280 * @retval true The wait flags were equal to the expected wait flags.
2281 * @retval false The wait flags were not equal to the expected wait flags.
2282 */
2283RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
2284  Thread_Control    *the_thread,
2285  Thread_Wait_flags  expected_flags,
2286  Thread_Wait_flags  desired_flags
2287)
2288{
2289#if defined(RTEMS_SMP)
2290  return _Atomic_Compare_exchange_uint(
2291    &the_thread->Wait.flags,
2292    &expected_flags,
2293    desired_flags,
2294    ATOMIC_ORDER_ACQUIRE,
2295    ATOMIC_ORDER_ACQUIRE
2296  );
2297#else
2298  bool      success;
2299  ISR_Level level;
2300
2301  _ISR_Local_disable( level );
2302
2303  success = _Thread_Wait_flags_try_change_release(
2304    the_thread,
2305    expected_flags,
2306    desired_flags
2307  );
2308
2309  _ISR_Local_enable( level );
2310  return success;
2311#endif
2312}
2313
2314/**
2315 * @brief Returns the object identifier of the object containing the current
2316 * thread wait queue.
2317 *
2318 * This function may be used for debug and system information purposes.  The
2319 * caller must be the owner of the thread lock.
2320 *
2321 * @param the_thread The thread.
2322 *
2323 * @retval 0 The thread waits on no thread queue currently, the thread wait
2324 *   queue is not contained in an object, or the current thread state provides
2325 *   insufficient information, e.g. the thread is in the middle of a blocking
2326 *   operation.
2327 * @retval other The object identifier of the object containing the thread wait
2328 *   queue.
2329 */
2330Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
2331
2332/**
2333 * @brief Get the status of the wait return code of the thread.
2334 *
2335 * @param the_thread The thread to get the status of the wait return code of.
2336 */
2337RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
2338  const Thread_Control *the_thread
2339)
2340{
2341  return (Status_Control) the_thread->Wait.return_code;
2342}
2343
2344/**
2345 * @brief Cancels a blocking operation so that the thread can continue its
2346 * execution.
2347 *
2348 * In case this function actually cancelled the blocking operation, then the
2349 * thread wait return code is set to the specified status.
2350 *
2351 * A specialization of this function is _Thread_Timeout().
2352 *
2353 * @param[in, out] the_thread The thread.
2354 * @param status The thread wait status.
2355 */
2356void _Thread_Continue( Thread_Control *the_thread, Status_Control status );
2357
2358/**
2359 * @brief General purpose thread wait timeout.
2360 *
2361 * @param the_watchdog The thread timer watchdog.
2362 */
2363void _Thread_Timeout( Watchdog_Control *the_watchdog );
2364
2365/**
2366 * @brief Initializes the thread timer.
2367 *
2368 * @param [in, out] timer The timer to initialize.
2369 * @param cpu The cpu for the operation.
2370 */
2371RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
2372  Thread_Timer_information *timer,
2373  Per_CPU_Control          *cpu
2374)
2375{
2376  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
2377  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
2378  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
2379}
2380
2381/**
2382 * @brief Adds timeout ticks to the thread.
2383 *
2384 * @param[in, out] the_thread The thread to add the timeout ticks to.
2385 * @param cpu The cpu for the operation.
2386 * @param ticks The ticks to add to the timeout ticks.
2387 */
2388RTEMS_INLINE_ROUTINE void _Thread_Add_timeout_ticks(
2389  Thread_Control    *the_thread,
2390  Per_CPU_Control   *cpu,
2391  Watchdog_Interval  ticks
2392)
2393{
2394  ISR_lock_Context lock_context;
2395
2396  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2397
2398  the_thread->Timer.header =
2399    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
2400  the_thread->Timer.Watchdog.routine = _Thread_Timeout;
2401  _Watchdog_Per_CPU_insert_ticks( &the_thread->Timer.Watchdog, cpu, ticks );
2402
2403  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2404}
2405
2406/**
2407 * @brief Inserts the cpu's watchdog realtime into the thread's timer.
2408 *
2409 * @param[in, out] the_thread for the operation.
2410 * @param cpu The cpu to get the watchdog header from.
2411 * @param routine The watchdog routine for the thread.
2412 * @param expire Expiration for the watchdog.
2413 */
2414RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_realtime(
2415  Thread_Control                 *the_thread,
2416  Per_CPU_Control                *cpu,
2417  Watchdog_Service_routine_entry  routine,
2418  uint64_t                        expire
2419)
2420{
2421  ISR_lock_Context  lock_context;
2422  Watchdog_Header  *header;
2423
2424  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2425
2426  header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
2427  the_thread->Timer.header = header;
2428  the_thread->Timer.Watchdog.routine = routine;
2429  _Watchdog_Per_CPU_insert( &the_thread->Timer.Watchdog, cpu, header, expire );
2430
2431  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2432}
2433
2434/**
2435 * @brief Remove the watchdog timer from the thread.
2436 *
2437 * @param[in, out] the_thread The thread to remove the watchdog from.
2438 */
2439RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
2440{
2441  ISR_lock_Context lock_context;
2442
2443  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
2444
2445  _Watchdog_Per_CPU_remove(
2446    &the_thread->Timer.Watchdog,
2447#if defined(RTEMS_SMP)
2448    the_thread->Timer.Watchdog.cpu,
2449#else
2450    _Per_CPU_Get(),
2451#endif
2452    the_thread->Timer.header
2453  );
2454
2455  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
2456}
2457
2458/**
2459 * @brief Remove the watchdog timer from the thread and unblock if necessary.
2460 *
2461 * @param[in, out] the_thread The thread to remove the watchdog from and unblock
2462 *      if necessary.
2463 * @param queue The thread queue.
2464 */
2465RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
2466  Thread_Control     *the_thread,
2467  Thread_queue_Queue *queue
2468)
2469{
2470  _Thread_Wait_tranquilize( the_thread );
2471  _Thread_Timer_remove( the_thread );
2472
2473#if defined(RTEMS_MULTIPROCESSING)
2474  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
2475    _Thread_Unblock( the_thread );
2476  } else {
2477    _Thread_queue_Unblock_proxy( queue, the_thread );
2478  }
2479#else
2480  (void) queue;
2481  _Thread_Unblock( the_thread );
2482#endif
2483}
2484
2485/**
2486 * @brief Sets the name of the thread.
2487 *
2488 * @param[out] the_thread  The thread to change the name of.
2489 * @param name The new name for the thread.
2490 *
2491 * @retval STATUS_SUCCESSFUL The operation succeeded.
2492 * @retval STATUS_RESULT_TOO_LARGE The name was too long.
2493 */
2494Status_Control _Thread_Set_name(
2495  Thread_Control *the_thread,
2496  const char     *name
2497);
2498
2499/**
2500 * @brief Gets the name of the thread.
2501 *
2502 * @param the_thread The thread to get the name of.
2503 * @param[out] buffer Contains the thread's name.
2504 * @param buffer_size The size of @a buffer.
2505 *
2506 * @return The number of bytes copied to @a buffer.
2507 */
2508size_t _Thread_Get_name(
2509  const Thread_Control *the_thread,
2510  char                 *buffer,
2511  size_t                buffer_size
2512);
2513
2514#if defined(RTEMS_SMP)
2515#define THREAD_PIN_STEP 2
2516
2517#define THREAD_PIN_PREEMPTION 1
2518
2519/**
2520 * @brief Unpins the thread.
2521 *
2522 * @param executing The currently executing thread.
2523 * @param cpu_self The cpu for the operation.
2524 */
2525void _Thread_Do_unpin(
2526  Thread_Control  *executing,
2527  Per_CPU_Control *cpu_self
2528);
2529#endif
2530
2531/**
2532 * @brief Pin the executing thread.
2533 *
2534 * @param executing The currently executing thread.
2535 */
2536RTEMS_INLINE_ROUTINE void _Thread_Pin( Thread_Control *executing )
2537{
2538#if defined(RTEMS_SMP)
2539  _Assert( executing == _Thread_Executing );
2540
2541  executing->Scheduler.pin_level += THREAD_PIN_STEP;
2542#else
2543  (void) executing;
2544#endif
2545}
2546
2547/**
2548 * @brief Unpins the thread.
2549 *
2550 * @param executing The currently executing thread.
2551 * @param cpu_self The cpu for the operation.
2552 */
2553RTEMS_INLINE_ROUTINE void _Thread_Unpin(
2554  Thread_Control  *executing,
2555  Per_CPU_Control *cpu_self
2556)
2557{
2558#if defined(RTEMS_SMP)
2559  unsigned int pin_level;
2560
2561  _Assert( executing == _Thread_Executing );
2562
2563  pin_level = executing->Scheduler.pin_level;
2564  _Assert( pin_level > 0 );
2565
2566  if (
2567    RTEMS_PREDICT_TRUE(
2568      pin_level != ( THREAD_PIN_STEP | THREAD_PIN_PREEMPTION )
2569    )
2570  ) {
2571    executing->Scheduler.pin_level = pin_level - THREAD_PIN_STEP;
2572  } else {
2573    _Thread_Do_unpin( executing, cpu_self );
2574  }
2575#else
2576  (void) executing;
2577  (void) cpu_self;
2578#endif
2579}
2580
2581/** @}*/
2582
2583#ifdef __cplusplus
2584}
2585#endif
2586
2587#if defined(RTEMS_MULTIPROCESSING)
2588#include <rtems/score/threadmp.h>
2589#endif
2590
2591#endif
2592/* end of include file */
Note: See TracBrowser for help on using the repository browser.