source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ e41308ea

5
Last change on this file since e41308ea was e41308ea, checked in by Sebastian Huber <sebastian.huber@…>, on 08/22/16 at 08:58:34

score: Introduce Thread_queue_Lock_context

Introduce Thread_queue_Lock_context to contain the context necessary for
thread queue lock and thread wait lock acquire/release operations to
reduce the Thread_Control size.

  • Property mode set to 100644
File size: 44.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2016 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/resourceimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/threadqimpl.h>
35#include <rtems/score/todimpl.h>
36#include <rtems/score/freechain.h>
37#include <rtems/score/watchdogimpl.h>
38#include <rtems/config.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/**
45 * @addtogroup ScoreThread
46 */
47/**@{**/
48
49/**
50 *  The following structure contains the information necessary to manage
51 *  a thread which it is  waiting for a resource.
52 */
53#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
54
55/**
56 *  Self for the GNU Ada Run-Time
57 */
58extern void *rtems_ada_self;
59
60typedef struct {
61  Objects_Information Objects;
62
63  Freechain_Control Free_thread_queue_heads;
64} Thread_Information;
65
66/**
67 *  The following defines the information control block used to
68 *  manage this class of objects.
69 */
70extern Thread_Information _Thread_Internal_information;
71
72/**
73 *  The following points to the thread whose floating point
74 *  context is currently loaded.
75 */
76#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
77extern Thread_Control *_Thread_Allocated_fp;
78#endif
79
80#define THREAD_CHAIN_NODE_TO_THREAD( node ) \
81  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.Chain )
82
83#define THREAD_RBTREE_NODE_TO_THREAD( node ) \
84  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.RBTree )
85
86#if defined(RTEMS_SMP)
87#define THREAD_RESOURCE_NODE_TO_THREAD( node ) \
88  RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node )
89#endif
90
91void _Thread_Initialize_information(
92  Thread_Information  *information,
93  Objects_APIs         the_api,
94  uint16_t             the_class,
95  uint32_t             maximum,
96  bool                 is_string,
97  uint32_t             maximum_name_length
98);
99
100/**
101 *  @brief Initialize thread handler.
102 *
103 *  This routine performs the initialization necessary for this handler.
104 */
105void _Thread_Handler_initialization(void);
106
107/**
108 *  @brief Create idle thread.
109 *
110 *  This routine creates the idle thread.
111 *
112 *  @warning No thread should be created before this one.
113 */
114void _Thread_Create_idle(void);
115
116/**
117 *  @brief Start thread multitasking.
118 *
119 *  This routine initiates multitasking.  It is invoked only as
120 *  part of initialization and its invocation is the last act of
121 *  the non-multitasking part of the system initialization.
122 */
123void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
124
125/**
126 *  @brief Allocate the requested stack space for the thread.
127 *
128 *  Allocate the requested stack space for the thread.
129 *  Set the Start.stack field to the address of the stack.
130 *
131 *  @param[in] the_thread is the thread where the stack space is requested
132 *  @param[in] stack_size is the stack space is requested
133 *
134 *  @retval actual size allocated after any adjustment
135 *  @retval zero if the allocation failed
136 */
137size_t _Thread_Stack_Allocate(
138  Thread_Control *the_thread,
139  size_t          stack_size
140);
141
142/**
143 *  @brief Deallocate thread stack.
144 *
145 *  Deallocate the Thread's stack.
146 */
147void _Thread_Stack_Free(
148  Thread_Control *the_thread
149);
150
151/**
152 *  @brief Initialize thread.
153 *
154 *  This routine initializes the specified the thread.  It allocates
155 *  all memory associated with this thread.  It completes by adding
156 *  the thread to the local object table so operations on this
157 *  thread id are allowed.
158 *
159 *  @note If stack_area is NULL, it is allocated from the workspace.
160 *
161 *  @note If the stack is allocated from the workspace, then it is
162 *        guaranteed to be of at least minimum size.
163 */
164bool _Thread_Initialize(
165  Thread_Information                   *information,
166  Thread_Control                       *the_thread,
167  const struct Scheduler_Control       *scheduler,
168  void                                 *stack_area,
169  size_t                                stack_size,
170  bool                                  is_fp,
171  Priority_Control                      priority,
172  bool                                  is_preemptible,
173  Thread_CPU_budget_algorithms          budget_algorithm,
174  Thread_CPU_budget_algorithm_callout   budget_callout,
175  uint32_t                              isr_level,
176  Objects_Name                          name
177);
178
179/**
180 *  @brief Initializes thread and executes it.
181 *
182 *  This routine initializes the executable information for a thread
183 *  and makes it ready to execute.  After this routine executes, the
184 *  thread competes with all other threads for CPU time.
185 *
186 *  @param the_thread The thread to be started.
187 *  @param entry The thread entry information.
188 */
189bool _Thread_Start(
190  Thread_Control                 *the_thread,
191  const Thread_Entry_information *entry,
192  ISR_lock_Context               *lock_context
193);
194
195void _Thread_Restart_self(
196  Thread_Control                 *executing,
197  const Thread_Entry_information *entry,
198  ISR_lock_Context               *lock_context
199) RTEMS_NO_RETURN;
200
201bool _Thread_Restart_other(
202  Thread_Control                 *the_thread,
203  const Thread_Entry_information *entry,
204  ISR_lock_Context               *lock_context
205);
206
207void _Thread_Yield( Thread_Control *executing );
208
209Thread_Life_state _Thread_Change_life(
210  Thread_Life_state clear,
211  Thread_Life_state set,
212  Thread_Life_state ignore
213);
214
215Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
216
217/**
218 * @brief Kills all zombie threads in the system.
219 *
220 * Threads change into the zombie state as the last step in the thread
221 * termination sequence right before a context switch to the heir thread is
222 * initiated.  Since the thread stack is still in use during this phase we have
223 * to postpone the thread stack reclamation until this point.  On SMP
224 * configurations we may have to busy wait for context switch completion here.
225 */
226void _Thread_Kill_zombies( void );
227
228void _Thread_Exit(
229  Thread_Control    *executing,
230  Thread_Life_state  set,
231  void              *exit_value
232);
233
234void _Thread_Join(
235  Thread_Control       *the_thread,
236  States_Control        waiting_for_join,
237  Thread_Control       *executing,
238  Thread_queue_Context *queue_context
239);
240
241void _Thread_Cancel(
242  Thread_Control *the_thread,
243  Thread_Control *executing,
244  void           *exit_value
245);
246
247/**
248 * @brief Closes the thread.
249 *
250 * Closes the thread object and starts the thread termination sequence.  In
251 * case the executing thread is not terminated, then this function waits until
252 * the terminating thread reached the zombie state.
253 */
254void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
255
256RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
257{
258  return _States_Is_ready( the_thread->current_state );
259}
260
261States_Control _Thread_Clear_state_locked(
262  Thread_Control *the_thread,
263  States_Control  state
264);
265
266/**
267 * @brief Clears the specified thread state.
268 *
269 * In case the previous state is a non-ready state and the next state is the
270 * ready state, then the thread is unblocked by the scheduler.
271 *
272 * @param[in] the_thread The thread.
273 * @param[in] state The state to clear.  It must not be zero.
274 *
275 * @return The previous state.
276 */
277States_Control _Thread_Clear_state(
278  Thread_Control *the_thread,
279  States_Control  state
280);
281
282States_Control _Thread_Set_state_locked(
283  Thread_Control *the_thread,
284  States_Control  state
285);
286
287/**
288 * @brief Sets the specified thread state.
289 *
290 * In case the previous state is the ready state, then the thread is blocked by
291 * the scheduler.
292 *
293 * @param[in] the_thread The thread.
294 * @param[in] state The state to set.  It must not be zero.
295 *
296 * @return The previous state.
297 */
298States_Control _Thread_Set_state(
299  Thread_Control *the_thread,
300  States_Control  state
301);
302
303/**
304 *  @brief Initializes enviroment for a thread.
305 *
306 *  This routine initializes the context of @a the_thread to its
307 *  appropriate starting state.
308 *
309 *  @param[in] the_thread is the pointer to the thread control block.
310 */
311void _Thread_Load_environment(
312  Thread_Control *the_thread
313);
314
315void _Thread_Entry_adaptor_idle( Thread_Control *executing );
316
317void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
318
319void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
320
321/**
322 *  @brief Wrapper function for all threads.
323 *
324 *  This routine is the wrapper function for all threads.  It is
325 *  the starting point for all threads.  The user provided thread
326 *  entry point is invoked by this routine.  Operations
327 *  which must be performed immediately before and after the user's
328 *  thread executes are found here.
329 *
330 *  @note On entry, it is assumed all interrupts are blocked and that this
331 *  routine needs to set the initial isr level.  This may or may not
332 *  actually be needed by the context switch routine and as a result
333 *  interrupts may already be at there proper level.  Either way,
334 *  setting the initial isr level properly here is safe.
335 */
336void _Thread_Handler( void );
337
338/**
339 * @brief Executes the global constructors and then restarts itself as the
340 * first initialization thread.
341 *
342 * The first initialization thread is the first RTEMS initialization task or
343 * the first POSIX initialization thread in case no RTEMS initialization tasks
344 * are present.
345 */
346void _Thread_Global_construction(
347  Thread_Control                 *executing,
348  const Thread_Entry_information *entry
349) RTEMS_NO_RETURN;
350
351/**
352 *  @brief Ended the delay of a thread.
353 *
354 *  This routine is invoked when a thread must be unblocked at the
355 *  end of a time based delay (i.e. wake after or wake when).
356 *  It is called by the watchdog handler.
357 *
358 *  @param[in] id is the thread id
359 *  @param[in] ignored is not used
360 */
361void _Thread_Delay_ended(
362  Objects_Id  id,
363  void       *ignored
364);
365
366RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
367  Thread_Control   *the_thread,
368  ISR_lock_Context *lock_context
369)
370{
371  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
372}
373
374RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
375  Thread_Control   *the_thread,
376  ISR_lock_Context *lock_context
377)
378{
379  _ISR_lock_ISR_disable( lock_context );
380  _Thread_State_acquire_critical( the_thread, lock_context );
381}
382
383RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
384  ISR_lock_Context *lock_context
385)
386{
387  Thread_Control *executing;
388
389  _ISR_lock_ISR_disable( lock_context );
390  executing = _Thread_Executing;
391  _Thread_State_acquire_critical( executing, lock_context );
392
393  return executing;
394}
395
396RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
397  Thread_Control   *the_thread,
398  ISR_lock_Context *lock_context
399)
400{
401  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
402}
403
404RTEMS_INLINE_ROUTINE void _Thread_State_release(
405  Thread_Control   *the_thread,
406  ISR_lock_Context *lock_context
407)
408{
409  _Thread_State_release_critical( the_thread, lock_context );
410  _ISR_lock_ISR_enable( lock_context );
411}
412
413#if defined(RTEMS_DEBUG)
414RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
415  const Thread_Control *the_thread
416)
417{
418  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
419}
420#endif
421
422/**
423 * @brief Returns true if the left thread priority is less than the right
424 * thread priority in the intuitive sense of priority and false otherwise.
425 */
426RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
427  Priority_Control left,
428  Priority_Control right
429)
430{
431  return left > right;
432}
433
434/**
435 * @brief Returns the highest priority of the left and right thread priorities
436 * in the intuitive sense of priority.
437 */
438RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
439  Priority_Control left,
440  Priority_Control right
441)
442{
443  return _Thread_Priority_less_than( left, right ) ? right : left;
444}
445
446/**
447 * @brief Filters a thread priority change.
448 *
449 * Called by _Thread_Change_priority() under the protection of the thread lock.
450 *
451 * @param[in] the_thread The thread.
452 * @param[in, out] new_priority The new priority of the thread.  The filter may
453 * alter this value.
454 * @param[in] arg The argument passed to _Thread_Change_priority().
455 *
456 * @retval true Change the current priority.
457 * @retval false Otherwise.
458 */
459typedef bool ( *Thread_Change_priority_filter )(
460  Thread_Control   *the_thread,
461  Priority_Control *new_priority,
462  void             *arg
463);
464
465Thread_Control *_Thread_Apply_priority(
466  Thread_Control                *the_thread,
467  Priority_Control               new_priority,
468  void                          *arg,
469  Thread_Change_priority_filter  filter,
470  bool                           prepend_it
471);
472
473void _Thread_Update_priority( Thread_Control *the_thread );
474
475/**
476 * @brief Changes the priority of a thread if allowed by the filter function.
477 *
478 * It changes current priority of the thread to the new priority in case the
479 * filter function returns true.  In this case the scheduler is notified of the
480 * priority change as well.
481 *
482 * @param[in] the_thread The thread.
483 * @param[in] new_priority The new priority of the thread.
484 * @param[in] arg The argument for the filter function.
485 * @param[in] filter The filter function to determine if a priority change is
486 * allowed and optionally perform other actions under the protection of the
487 * thread lock simultaneously with the update of the current priority.
488 * @param[in] prepend_it In case this is true, then the thread is prepended to
489 * its priority group in its scheduler instance, otherwise it is appended.
490 */
491void _Thread_Change_priority(
492  Thread_Control                *the_thread,
493  Priority_Control               new_priority,
494  void                          *arg,
495  Thread_Change_priority_filter  filter,
496  bool                           prepend_it
497);
498
499/**
500 * @brief Raises the priority of a thread.
501 *
502 * It changes the current priority of the thread to the new priority if the new
503 * priority is higher than the current priority.  In this case the thread is
504 * appended to its new priority group in its scheduler instance.
505 *
506 * @param[in] the_thread The thread.
507 * @param[in] new_priority The new priority of the thread.
508 *
509 * @see _Thread_Change_priority().
510 */
511void _Thread_Raise_priority(
512  Thread_Control   *the_thread,
513  Priority_Control  new_priority
514);
515
516/**
517 * @brief Sets the current to the real priority of a thread.
518 *
519 * Sets the priority restore hint to false.
520 */
521void _Thread_Restore_priority( Thread_Control *the_thread );
522
523/**
524 * @brief Sets the priority of a thread.
525 *
526 * It sets the real priority of the thread.  In addition it changes the current
527 * priority of the thread if the new priority is higher than the current
528 * priority or the thread owns no resources.
529 *
530 * @param[in] the_thread The thread.
531 * @param[in] new_priority The new priority of the thread.
532 * @param[out] old_priority The old real priority of the thread.  This pointer
533 * must not be @c NULL.
534 * @param[in] prepend_it In case this is true, then the thread is prepended to
535 * its priority group in its scheduler instance, otherwise it is appended.
536 *
537 * @see _Thread_Change_priority().
538 */
539void _Thread_Set_priority(
540  Thread_Control   *the_thread,
541  Priority_Control  new_priority,
542  Priority_Control *old_priority,
543  bool              prepend_it
544);
545
546RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
547  Objects_Id id
548)
549{
550  uint32_t the_api;
551
552  the_api = _Objects_Get_API( id );
553
554  if ( !_Objects_Is_api_valid( the_api ) ) {
555    return NULL;
556  }
557
558  /*
559   * Threads are always first class :)
560   *
561   * There is no need to validate the object class of the object identifier,
562   * since this will be done by the object get methods.
563   */
564  return _Objects_Information_table[ the_api ][ 1 ];
565}
566
567/**
568 * @brief Gets a thread by its identifier.
569 *
570 * @see _Objects_Get().
571 */
572Thread_Control *_Thread_Get(
573  Objects_Id         id,
574  ISR_lock_Context  *lock_context
575);
576
577RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
578  const Thread_Control *thread
579)
580{
581#if defined(RTEMS_SMP)
582  return thread->Scheduler.cpu;
583#else
584  (void) thread;
585
586  return _Per_CPU_Get();
587#endif
588}
589
590RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
591  Thread_Control *thread,
592  Per_CPU_Control *cpu
593)
594{
595#if defined(RTEMS_SMP)
596  thread->Scheduler.cpu = cpu;
597#else
598  (void) thread;
599  (void) cpu;
600#endif
601}
602
603/**
604 * This function returns true if the_thread is the currently executing
605 * thread, and false otherwise.
606 */
607
608RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
609  const Thread_Control *the_thread
610)
611{
612  return ( the_thread == _Thread_Executing );
613}
614
615#if defined(RTEMS_SMP)
616/**
617 * @brief Returns @a true in case the thread executes currently on some
618 * processor in the system, otherwise @a false.
619 *
620 * Do not confuse this with _Thread_Is_executing() which checks only the
621 * current processor.
622 */
623RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
624  const Thread_Control *the_thread
625)
626{
627  return _CPU_Context_Get_is_executing( &the_thread->Registers );
628}
629#endif
630
631/**
632 * This function returns true if the_thread is the heir
633 * thread, and false otherwise.
634 */
635
636RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
637  const Thread_Control *the_thread
638)
639{
640  return ( the_thread == _Thread_Heir );
641}
642
643/**
644 * This routine clears any blocking state for the_thread.  It performs
645 * any necessary scheduling operations including the selection of
646 * a new heir thread.
647 */
648
649RTEMS_INLINE_ROUTINE void _Thread_Unblock (
650  Thread_Control *the_thread
651)
652{
653  _Thread_Clear_state( the_thread, STATES_BLOCKED );
654}
655
656/**
657 * This function returns true if the floating point context of
658 * the_thread is currently loaded in the floating point unit, and
659 * false otherwise.
660 */
661
662#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
663RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
664  const Thread_Control *the_thread
665)
666{
667  return ( the_thread == _Thread_Allocated_fp );
668}
669#endif
670
671/*
672 *  If the CPU has hardware floating point, then we must address saving
673 *  and restoring it as part of the context switch.
674 *
675 *  The second conditional compilation section selects the algorithm used
676 *  to context switch between floating point tasks.  The deferred algorithm
677 *  can be significantly better in a system with few floating point tasks
678 *  because it reduces the total number of save and restore FP context
679 *  operations.  However, this algorithm can not be used on all CPUs due
680 *  to unpredictable use of FP registers by some compilers for integer
681 *  operations.
682 */
683
684RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
685{
686#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
687#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
688  if ( executing->fp_context != NULL )
689    _Context_Save_fp( &executing->fp_context );
690#endif
691#endif
692}
693
694RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
695{
696#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
697#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
698  if ( (executing->fp_context != NULL) &&
699       !_Thread_Is_allocated_fp( executing ) ) {
700    if ( _Thread_Allocated_fp != NULL )
701      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
702    _Context_Restore_fp( &executing->fp_context );
703    _Thread_Allocated_fp = executing;
704  }
705#else
706  if ( executing->fp_context != NULL )
707    _Context_Restore_fp( &executing->fp_context );
708#endif
709#endif
710}
711
712/**
713 * This routine is invoked when the currently loaded floating
714 * point context is now longer associated with an active thread.
715 */
716
717#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
718RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
719{
720  _Thread_Allocated_fp = NULL;
721}
722#endif
723
724/**
725 * This function returns true if dispatching is disabled, and false
726 * otherwise.
727 */
728
729RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
730{
731  return ( _Thread_Dispatch_necessary );
732}
733
734/**
735 * This function returns true if the_thread is NULL and false otherwise.
736 */
737
738RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
739  const Thread_Control *the_thread
740)
741{
742  return ( the_thread == NULL );
743}
744
745/**
746 * @brief Is proxy blocking.
747 *
748 * status which indicates that a proxy is blocking, and false otherwise.
749 */
750RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
751  uint32_t   code
752)
753{
754  return (code == THREAD_STATUS_PROXY_BLOCKING);
755}
756
757RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
758{
759  /* Idle threads */
760  uint32_t maximum_internal_threads =
761    rtems_configuration_get_maximum_processors();
762
763  /* MPCI thread */
764#if defined(RTEMS_MULTIPROCESSING)
765  if ( _System_state_Is_multiprocessing ) {
766    ++maximum_internal_threads;
767  }
768#endif
769
770  return maximum_internal_threads;
771}
772
773RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
774{
775  return (Thread_Control *)
776    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
777}
778
779/**
780 * @brief Gets the heir of the processor and makes it executing.
781 *
782 * Must be called with interrupts disabled.  The thread dispatch necessary
783 * indicator is cleared as a side-effect.
784 *
785 * @return The heir thread.
786 *
787 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
788 * _Thread_Dispatch_update_heir().
789 */
790RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
791  Per_CPU_Control *cpu_self
792)
793{
794  Thread_Control *heir;
795
796  heir = cpu_self->heir;
797  cpu_self->dispatch_necessary = false;
798  cpu_self->executing = heir;
799
800  return heir;
801}
802
803RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
804  Thread_Control  *the_thread,
805  Per_CPU_Control *cpu
806)
807{
808  Timestamp_Control last;
809  Timestamp_Control ran;
810
811  last = cpu->cpu_usage_timestamp;
812  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
813  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
814  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
815}
816
817#if defined( RTEMS_SMP )
818RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
819  Per_CPU_Control *cpu_self,
820  Per_CPU_Control *cpu_for_heir,
821  Thread_Control  *heir
822)
823{
824  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
825
826  cpu_for_heir->heir = heir;
827
828  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
829}
830#endif
831
832void _Thread_Get_CPU_time_used(
833  Thread_Control    *the_thread,
834  Timestamp_Control *cpu_time_used
835);
836
837RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
838  Thread_Action_control *action_control
839)
840{
841  _Chain_Initialize_empty( &action_control->Chain );
842}
843
844RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
845  Thread_Action *action
846)
847{
848  _Chain_Set_off_chain( &action->Node );
849}
850
851RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
852  Thread_Control        *the_thread,
853  Thread_Action         *action,
854  Thread_Action_handler  handler
855)
856{
857  Per_CPU_Control *cpu_of_thread;
858
859  _Assert( _Thread_State_is_owner( the_thread ) );
860
861  cpu_of_thread = _Thread_Get_CPU( the_thread );
862
863  action->handler = handler;
864
865  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
866
867  _Chain_Append_if_is_off_chain_unprotected(
868    &the_thread->Post_switch_actions.Chain,
869    &action->Node
870  );
871}
872
873RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
874  Thread_Life_state life_state
875)
876{
877  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
878}
879
880RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
881  Thread_Life_state life_state
882)
883{
884  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
885}
886
887RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
888  Thread_Life_state life_state
889)
890{
891  return ( life_state
892    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
893}
894
895RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
896  Thread_Life_state life_state
897)
898{
899  return ( life_state
900    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
901}
902
903RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
904  const Thread_Control *the_thread
905)
906{
907  _Assert( _Thread_State_is_owner( the_thread ) );
908  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
909}
910
911/**
912 * @brief Returns true if the thread owns resources, and false otherwise.
913 *
914 * Resources are accounted with the Thread_Control::resource_count resource
915 * counter.  This counter is used by semaphore objects for example.
916 *
917 * In addition to the resource counter there is a resource dependency tree
918 * available on SMP configurations.  In case this tree is non-empty, then the
919 * thread owns resources.
920 *
921 * @param[in] the_thread The thread.
922 */
923RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
924  const Thread_Control *the_thread
925)
926{
927  bool owns_resources = the_thread->resource_count != 0;
928
929#if defined(RTEMS_SMP)
930  owns_resources = owns_resources
931    || _Resource_Node_owns_resources( &the_thread->Resource_node );
932#endif
933
934  return owns_resources;
935}
936
937/**
938 * @brief Acquires the thread wait default lock inside a critical section
939 * (interrupts disabled).
940 *
941 * @param[in] the_thread The thread.
942 * @param[in] lock_context The lock context used for the corresponding lock
943 *   release.
944 *
945 * @see _Thread_Wait_release_default_critical().
946 */
947RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
948  Thread_Control   *the_thread,
949  ISR_lock_Context *lock_context
950)
951{
952  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
953}
954
955/**
956 * @brief Acquires the thread wait default lock and returns the executing
957 * thread.
958 *
959 * @param[in] lock_context The lock context used for the corresponding lock
960 *   release.
961 *
962 * @return The executing thread.
963 *
964 * @see _Thread_Wait_release_default().
965 */
966RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
967  ISR_lock_Context *lock_context
968)
969{
970  Thread_Control *executing;
971
972  _ISR_lock_ISR_disable( lock_context );
973  executing = _Thread_Executing;
974  _Thread_Wait_acquire_default_critical( executing, lock_context );
975
976  return executing;
977}
978
979/**
980 * @brief Acquires the thread wait default lock and disables interrupts.
981 *
982 * @param[in] the_thread The thread.
983 * @param[in] lock_context The lock context used for the corresponding lock
984 *   release.
985 *
986 * @see _Thread_Wait_release_default().
987 */
988RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
989  Thread_Control   *the_thread,
990  ISR_lock_Context *lock_context
991)
992{
993  _ISR_lock_ISR_disable( lock_context );
994  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
995}
996
997/**
998 * @brief Releases the thread wait default lock inside a critical section
999 * (interrupts disabled).
1000 *
1001 * The previous interrupt status is not restored.
1002 *
1003 * @param[in] the_thread The thread.
1004 * @param[in] lock_context The lock context used for the corresponding lock
1005 *   acquire.
1006 */
1007RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1008  Thread_Control   *the_thread,
1009  ISR_lock_Context *lock_context
1010)
1011{
1012  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1013}
1014
1015/**
1016 * @brief Releases the thread wait default lock and restores the previous
1017 * interrupt status.
1018 *
1019 * @param[in] the_thread The thread.
1020 * @param[in] lock_context The lock context used for the corresponding lock
1021 *   acquire.
1022 */
1023RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1024  Thread_Control   *the_thread,
1025  ISR_lock_Context *lock_context
1026)
1027{
1028  _Thread_Wait_release_default_critical( the_thread, lock_context );
1029  _ISR_lock_ISR_enable( lock_context );
1030}
1031
1032#if defined(RTEMS_SMP)
1033#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1034  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1035
1036RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1037  Thread_Control            *the_thread,
1038  Thread_queue_Lock_context *queue_lock_context
1039)
1040{
1041  Chain_Node *first;
1042
1043  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1044  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1045
1046  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1047    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1048  }
1049}
1050
1051RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1052  Thread_queue_Queue        *queue,
1053  Thread_queue_Lock_context *queue_lock_context
1054)
1055{
1056  _Thread_queue_Queue_acquire_critical(
1057    queue,
1058    &_Thread_Executing->Potpourri_stats,
1059    &queue_lock_context->Lock_context
1060  );
1061}
1062
1063RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1064  Thread_queue_Queue        *queue,
1065  Thread_queue_Lock_context *queue_lock_context
1066)
1067{
1068  _Thread_queue_Queue_release_critical(
1069    queue,
1070    &queue_lock_context->Lock_context
1071  );
1072}
1073#endif
1074
1075/**
1076 * @brief Acquires the thread wait lock inside a critical section (interrupts
1077 * disabled).
1078 *
1079 * @param[in] the_thread The thread.
1080 * @param[in] queue_context The thread queue context for the corresponding
1081 *   _Thread_Wait_release_critical().
1082 *
1083 * @see _Thread_queue_Context_initialize().
1084 */
1085RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1086  Thread_Control       *the_thread,
1087  Thread_queue_Context *queue_context
1088)
1089{
1090#if defined(RTEMS_SMP)
1091  Thread_queue_Queue *queue;
1092
1093  _Thread_Wait_acquire_default_critical(
1094    the_thread,
1095    &queue_context->Lock_context.Lock_context
1096  );
1097
1098  queue = the_thread->Wait.queue;
1099  queue_context->Lock_context.Wait.queue = queue;
1100
1101  if ( queue != NULL ) {
1102    _Thread_queue_Gate_add(
1103      &the_thread->Wait.Lock.Pending_requests,
1104      &queue_context->Lock_context.Wait.Gate
1105    );
1106    _Thread_Wait_release_default_critical(
1107      the_thread,
1108      &queue_context->Lock_context.Lock_context
1109    );
1110    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1111
1112    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1113      _Thread_Wait_release_queue_critical(
1114        queue,
1115        &queue_context->Lock_context
1116      );
1117      _Thread_Wait_acquire_default_critical(
1118        the_thread,
1119        &queue_context->Lock_context.Lock_context
1120      );
1121      _Thread_Wait_remove_request_locked(
1122        the_thread,
1123        &queue_context->Lock_context
1124      );
1125      _Assert( the_thread->Wait.queue == NULL );
1126    }
1127  }
1128#else
1129  (void) the_thread;
1130  (void) queue_context;
1131#endif
1132}
1133
1134/**
1135 * @brief Acquires the thread wait default lock and disables interrupts.
1136 *
1137 * @param[in] the_thread The thread.
1138 * @param[in] queue_context The thread queue context for the corresponding
1139 *   _Thread_Wait_release().
1140 */
1141RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1142  Thread_Control       *the_thread,
1143  Thread_queue_Context *queue_context
1144)
1145{
1146  _Thread_queue_Context_initialize( queue_context );
1147  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1148  _Thread_Wait_acquire_critical( the_thread, queue_context );
1149}
1150
1151/**
1152 * @brief Releases the thread wait lock inside a critical section (interrupts
1153 * disabled).
1154 *
1155 * The previous interrupt status is not restored.
1156 *
1157 * @param[in] the_thread The thread.
1158 * @param[in] queue_context The thread queue context used for corresponding
1159 *   _Thread_Wait_acquire_critical().
1160 */
1161RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1162  Thread_Control       *the_thread,
1163  Thread_queue_Context *queue_context
1164)
1165{
1166#if defined(RTEMS_SMP)
1167  Thread_queue_Queue *queue;
1168
1169  queue = queue_context->Lock_context.Wait.queue;
1170
1171  if ( queue != NULL ) {
1172    _Thread_Wait_release_queue_critical(
1173      queue, &queue_context->Lock_context
1174    );
1175    _Thread_Wait_acquire_default_critical(
1176      the_thread,
1177      &queue_context->Lock_context.Lock_context
1178    );
1179    _Thread_Wait_remove_request_locked(
1180      the_thread,
1181      &queue_context->Lock_context
1182    );
1183  }
1184
1185  _Thread_Wait_release_default_critical(
1186    the_thread,
1187    &queue_context->Lock_context.Lock_context
1188  );
1189#else
1190  (void) the_thread;
1191  (void) queue_context;
1192#endif
1193}
1194
1195/**
1196 * @brief Releases the thread wait lock and restores the previous interrupt
1197 * status.
1198 *
1199 * @param[in] the_thread The thread.
1200 * @param[in] queue_context The thread queue context used for corresponding
1201 *   _Thread_Wait_acquire().
1202 */
1203RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1204  Thread_Control       *the_thread,
1205  Thread_queue_Context *queue_context
1206)
1207{
1208  _Thread_Wait_release_critical( the_thread, queue_context );
1209  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1210}
1211
1212/**
1213 * @brief Claims the thread wait queue and operations.
1214 *
1215 * The caller must not be the owner of the default thread wait lock.  The
1216 * caller must be the owner of the corresponding thread queue lock.
1217 *
1218 * @param[in] the_thread The thread.
1219 * @param[in] queue The new thread queue.
1220 * @param[in] operations The new thread operations.
1221 *
1222 * @see _Thread_Wait_restore_default().
1223 */
1224RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1225  Thread_Control                *the_thread,
1226  Thread_queue_Queue            *queue,
1227  const Thread_queue_Operations *operations
1228)
1229{
1230  ISR_lock_Context lock_context;
1231
1232  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1233
1234  _Assert( the_thread->Wait.queue == NULL );
1235
1236#if defined(RTEMS_SMP)
1237  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1238  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1239  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1240#endif
1241
1242  the_thread->Wait.queue = queue;
1243  the_thread->Wait.operations = operations;
1244
1245  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1246}
1247
1248/**
1249 * @brief Removes a thread wait lock request.
1250 *
1251 * On SMP configurations, removes a thread wait lock request.
1252 *
1253 * On other configurations, this function does nothing.
1254 *
1255 * @param[in] the_thread The thread.
1256 * @param[in] queue_lock_context The thread queue lock context used for
1257 *   corresponding _Thread_Wait_acquire().
1258 */
1259RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1260  Thread_Control            *the_thread,
1261  Thread_queue_Lock_context *queue_lock_context
1262)
1263{
1264#if defined(RTEMS_SMP)
1265  ISR_lock_Context lock_context;
1266
1267  _Thread_Wait_acquire_default( the_thread, &lock_context );
1268  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1269  _Thread_Wait_release_default( the_thread, &lock_context );
1270#else
1271  (void) the_thread;
1272  (void) queue_lock_context;
1273#endif
1274}
1275
1276/**
1277 * @brief Restores the default thread wait queue and operations.
1278 *
1279 * The caller must be the owner of the current thread wait queue lock.
1280 *
1281 * On SMP configurations, the pending requests are updated to use the stale
1282 * thread queue operations.
1283 *
1284 * @param[in] the_thread The thread.
1285 *
1286 * @see _Thread_Wait_claim().
1287 */
1288RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1289  Thread_Control *the_thread
1290)
1291{
1292#if defined(RTEMS_SMP)
1293  ISR_lock_Context  lock_context;
1294  Chain_Node       *node;
1295  const Chain_Node *tail;
1296
1297  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1298
1299  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1300  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1301
1302  if ( node != tail ) {
1303    do {
1304      Thread_queue_Context *queue_context;
1305
1306      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1307      queue_context->Lock_context.Wait.queue = NULL;
1308
1309      node = _Chain_Next( node );
1310    } while ( node != tail );
1311
1312    _Thread_queue_Gate_add(
1313      &the_thread->Wait.Lock.Pending_requests,
1314      &the_thread->Wait.Lock.Tranquilizer
1315    );
1316  } else {
1317    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1318  }
1319#endif
1320
1321  the_thread->Wait.queue = NULL;
1322  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1323
1324#if defined(RTEMS_SMP)
1325  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1326#endif
1327}
1328
1329/**
1330 * @brief Tranquilizes the thread after a wait on a thread queue.
1331 *
1332 * After the violent blocking procedure this function makes the thread calm and
1333 * peaceful again so that it can carry out its normal work.
1334 *
1335 * On SMP configurations, ensures that all pending thread wait lock requests
1336 * completed before the thread is able to begin a new thread wait procedure.
1337 *
1338 * On other configurations, this function does nothing.
1339 *
1340 * It must be called after a _Thread_Wait_claim() exactly once
1341 *  - after the corresponding thread queue lock was released, and
1342 *  - the default wait state is restored or some other processor is about to do
1343 *    this.
1344 *
1345 * @param[in] the_thread The thread.
1346 */
1347RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1348  Thread_Control *the_thread
1349)
1350{
1351#if defined(RTEMS_SMP)
1352  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1353#else
1354  (void) the_thread;
1355#endif
1356}
1357
1358/**
1359 * @brief Cancels a thread wait on a thread queue.
1360 *
1361 * @param[in] the_thread The thread.
1362 * @param[in] queue_context The thread queue context used for corresponding
1363 *   _Thread_Wait_acquire().
1364 */
1365RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1366  Thread_Control       *the_thread,
1367  Thread_queue_Context *queue_context
1368)
1369{
1370  Thread_queue_Queue *queue;
1371
1372  queue = the_thread->Wait.queue;
1373
1374#if defined(RTEMS_SMP)
1375  if ( queue != NULL ) {
1376    _Assert( queue_context->Lock_context.Wait.queue == queue );
1377#endif
1378
1379    ( *the_thread->Wait.operations->extract )( queue, the_thread );
1380    _Thread_Wait_restore_default( the_thread );
1381
1382#if defined(RTEMS_SMP)
1383    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1384    queue_context->Lock_context.Wait.queue = queue;
1385  }
1386#endif
1387}
1388
1389/**
1390 * @brief The initial thread wait flags value set by _Thread_Initialize().
1391 */
1392#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1393
1394/**
1395 * @brief Mask to get the thread wait state flags.
1396 */
1397#define THREAD_WAIT_STATE_MASK 0xffU
1398
1399/**
1400 * @brief Indicates that the thread begins with the blocking operation.
1401 *
1402 * A blocking operation consists of an optional watchdog initialization and the
1403 * setting of the appropriate thread blocking state with the corresponding
1404 * scheduler block operation.
1405 */
1406#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1407
1408/**
1409 * @brief Indicates that the thread completed the blocking operation.
1410 */
1411#define THREAD_WAIT_STATE_BLOCKED 0x2U
1412
1413/**
1414 * @brief Indicates that a condition to end the thread wait occurred.
1415 *
1416 * This could be a timeout, a signal, an event or a resource availability.
1417 */
1418#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1419
1420/**
1421 * @brief Mask to get the thread wait class flags.
1422 */
1423#define THREAD_WAIT_CLASS_MASK 0xff00U
1424
1425/**
1426 * @brief Indicates that the thread waits for an event.
1427 */
1428#define THREAD_WAIT_CLASS_EVENT 0x100U
1429
1430/**
1431 * @brief Indicates that the thread waits for a system event.
1432 */
1433#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1434
1435/**
1436 * @brief Indicates that the thread waits for an object.
1437 */
1438#define THREAD_WAIT_CLASS_OBJECT 0x400U
1439
1440/**
1441 * @brief Indicates that the thread waits for a period.
1442 */
1443#define THREAD_WAIT_CLASS_PERIOD 0x800U
1444
1445RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1446  Thread_Control    *the_thread,
1447  Thread_Wait_flags  flags
1448)
1449{
1450#if defined(RTEMS_SMP)
1451  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1452#else
1453  the_thread->Wait.flags = flags;
1454#endif
1455}
1456
1457RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1458  const Thread_Control *the_thread
1459)
1460{
1461#if defined(RTEMS_SMP)
1462  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1463#else
1464  return the_thread->Wait.flags;
1465#endif
1466}
1467
1468/**
1469 * @brief Tries to change the thread wait flags with release semantics in case
1470 * of success.
1471 *
1472 * Must be called inside a critical section (interrupts disabled).
1473 *
1474 * In case the wait flags are equal to the expected wait flags, then the wait
1475 * flags are set to the desired wait flags.
1476 *
1477 * @param[in] the_thread The thread.
1478 * @param[in] expected_flags The expected wait flags.
1479 * @param[in] desired_flags The desired wait flags.
1480 *
1481 * @retval true The wait flags were equal to the expected wait flags.
1482 * @retval false Otherwise.
1483 */
1484RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1485  Thread_Control    *the_thread,
1486  Thread_Wait_flags  expected_flags,
1487  Thread_Wait_flags  desired_flags
1488)
1489{
1490  _Assert( _ISR_Get_level() != 0 );
1491
1492#if defined(RTEMS_SMP)
1493  return _Atomic_Compare_exchange_uint(
1494    &the_thread->Wait.flags,
1495    &expected_flags,
1496    desired_flags,
1497    ATOMIC_ORDER_RELEASE,
1498    ATOMIC_ORDER_RELAXED
1499  );
1500#else
1501  bool success = ( the_thread->Wait.flags == expected_flags );
1502
1503  if ( success ) {
1504    the_thread->Wait.flags = desired_flags;
1505  }
1506
1507  return success;
1508#endif
1509}
1510
1511/**
1512 * @brief Tries to change the thread wait flags with acquire semantics.
1513 *
1514 * In case the wait flags are equal to the expected wait flags, then the wait
1515 * flags are set to the desired wait flags.
1516 *
1517 * @param[in] the_thread The thread.
1518 * @param[in] expected_flags The expected wait flags.
1519 * @param[in] desired_flags The desired wait flags.
1520 *
1521 * @retval true The wait flags were equal to the expected wait flags.
1522 * @retval false Otherwise.
1523 */
1524RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1525  Thread_Control    *the_thread,
1526  Thread_Wait_flags  expected_flags,
1527  Thread_Wait_flags  desired_flags
1528)
1529{
1530  bool success;
1531#if defined(RTEMS_SMP)
1532  return _Atomic_Compare_exchange_uint(
1533    &the_thread->Wait.flags,
1534    &expected_flags,
1535    desired_flags,
1536    ATOMIC_ORDER_ACQUIRE,
1537    ATOMIC_ORDER_ACQUIRE
1538  );
1539#else
1540  ISR_Level level;
1541
1542  _ISR_Local_disable( level );
1543
1544  success = _Thread_Wait_flags_try_change_release(
1545    the_thread,
1546    expected_flags,
1547    desired_flags
1548  );
1549
1550  _ISR_Local_enable( level );
1551#endif
1552
1553  return success;
1554}
1555
1556/**
1557 * @brief Returns the object identifier of the object containing the current
1558 * thread wait queue.
1559 *
1560 * This function may be used for debug and system information purposes.  The
1561 * caller must be the owner of the thread lock.
1562 *
1563 * @retval 0 The thread waits on no thread queue currently, the thread wait
1564 *   queue is not contained in an object, or the current thread state provides
1565 *   insufficient information, e.g. the thread is in the middle of a blocking
1566 *   operation.
1567 * @retval other The object identifier of the object containing the thread wait
1568 *   queue.
1569 */
1570Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1571
1572RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1573  const Thread_Control *the_thread
1574)
1575{
1576  return (Status_Control) the_thread->Wait.return_code;
1577}
1578
1579/**
1580 * @brief General purpose thread wait timeout.
1581 *
1582 * @param[in] watchdog The thread timer watchdog.
1583 */
1584void _Thread_Timeout( Watchdog_Control *watchdog );
1585
1586RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1587  Thread_Timer_information *timer,
1588  Per_CPU_Control          *cpu
1589)
1590{
1591  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1592  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1593  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1594}
1595
1596RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative(
1597  Thread_Control                 *the_thread,
1598  Per_CPU_Control                *cpu,
1599  Watchdog_Service_routine_entry  routine,
1600  Watchdog_Interval               ticks
1601)
1602{
1603  ISR_lock_Context lock_context;
1604
1605  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1606
1607  the_thread->Timer.header =
1608    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1609  the_thread->Timer.Watchdog.routine = routine;
1610  _Watchdog_Per_CPU_insert_relative( &the_thread->Timer.Watchdog, cpu, ticks );
1611
1612  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1613}
1614
1615RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_absolute(
1616  Thread_Control                 *the_thread,
1617  Per_CPU_Control                *cpu,
1618  Watchdog_Service_routine_entry  routine,
1619  uint64_t                        expire
1620)
1621{
1622  ISR_lock_Context lock_context;
1623
1624  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1625
1626  the_thread->Timer.header =
1627    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_ABSOLUTE ];
1628  the_thread->Timer.Watchdog.routine = routine;
1629  _Watchdog_Per_CPU_insert_absolute( &the_thread->Timer.Watchdog, cpu, expire );
1630
1631  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1632}
1633
1634RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1635{
1636  ISR_lock_Context lock_context;
1637
1638  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1639
1640  _Watchdog_Per_CPU_remove(
1641    &the_thread->Timer.Watchdog,
1642#if defined(RTEMS_SMP)
1643    the_thread->Timer.Watchdog.cpu,
1644#else
1645    _Per_CPU_Get(),
1646#endif
1647    the_thread->Timer.header
1648  );
1649
1650  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1651}
1652
1653RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1654  Thread_Control     *the_thread,
1655  Thread_queue_Queue *queue
1656)
1657{
1658  _Thread_Wait_tranquilize( the_thread );
1659  _Thread_Timer_remove( the_thread );
1660
1661#if defined(RTEMS_MULTIPROCESSING)
1662  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1663    _Thread_Unblock( the_thread );
1664  } else {
1665    _Thread_queue_Unblock_proxy( queue, the_thread );
1666  }
1667#else
1668  (void) queue;
1669  _Thread_Unblock( the_thread );
1670#endif
1671}
1672
1673/** @}*/
1674
1675#ifdef __cplusplus
1676}
1677#endif
1678
1679#if defined(RTEMS_MULTIPROCESSING)
1680#include <rtems/score/threadmp.h>
1681#endif
1682
1683#endif
1684/* end of include file */
Note: See TracBrowser for help on using the repository browser.