source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 7ced9d9b

5
Last change on this file since 7ced9d9b was 7ced9d9b, checked in by Sebastian Huber <sebastian.huber@…>, on 01/12/17 at 08:25:56

score: Add and use _Thread_Get_name()

Update #2858.

  • Property mode set to 100644
File size: 51.5 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2017 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/schedulernodeimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/threadqimpl.h>
35#include <rtems/score/todimpl.h>
36#include <rtems/score/freechain.h>
37#include <rtems/score/watchdogimpl.h>
38#include <rtems/config.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/**
45 * @addtogroup ScoreThread
46 */
47/**@{**/
48
49/**
50 *  The following structure contains the information necessary to manage
51 *  a thread which it is  waiting for a resource.
52 */
53#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
54
55/**
56 *  Self for the GNU Ada Run-Time
57 */
58extern void *rtems_ada_self;
59
60typedef struct {
61  Objects_Information Objects;
62
63  Freechain_Control Free_thread_queue_heads;
64} Thread_Information;
65
66/**
67 *  The following defines the information control block used to
68 *  manage this class of objects.
69 */
70extern Thread_Information _Thread_Internal_information;
71
72/**
73 *  The following points to the thread whose floating point
74 *  context is currently loaded.
75 */
76#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
77extern Thread_Control *_Thread_Allocated_fp;
78#endif
79
80#if defined(RTEMS_SMP)
81#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
82  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
83#endif
84
85typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
86
87void _Thread_Iterate(
88  Thread_Visitor  visitor,
89  void           *arg
90);
91
92void _Thread_Initialize_information(
93  Thread_Information  *information,
94  Objects_APIs         the_api,
95  uint16_t             the_class,
96  uint32_t             maximum,
97  bool                 is_string,
98  uint32_t             maximum_name_length
99);
100
101/**
102 *  @brief Initialize thread handler.
103 *
104 *  This routine performs the initialization necessary for this handler.
105 */
106void _Thread_Handler_initialization(void);
107
108/**
109 *  @brief Create idle thread.
110 *
111 *  This routine creates the idle thread.
112 *
113 *  @warning No thread should be created before this one.
114 */
115void _Thread_Create_idle(void);
116
117/**
118 *  @brief Start thread multitasking.
119 *
120 *  This routine initiates multitasking.  It is invoked only as
121 *  part of initialization and its invocation is the last act of
122 *  the non-multitasking part of the system initialization.
123 */
124void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
125
126/**
127 *  @brief Allocate the requested stack space for the thread.
128 *
129 *  Allocate the requested stack space for the thread.
130 *  Set the Start.stack field to the address of the stack.
131 *
132 *  @param[in] the_thread is the thread where the stack space is requested
133 *  @param[in] stack_size is the stack space is requested
134 *
135 *  @retval actual size allocated after any adjustment
136 *  @retval zero if the allocation failed
137 */
138size_t _Thread_Stack_Allocate(
139  Thread_Control *the_thread,
140  size_t          stack_size
141);
142
143/**
144 *  @brief Deallocate thread stack.
145 *
146 *  Deallocate the Thread's stack.
147 */
148void _Thread_Stack_Free(
149  Thread_Control *the_thread
150);
151
152/**
153 *  @brief Initialize thread.
154 *
155 *  This routine initializes the specified the thread.  It allocates
156 *  all memory associated with this thread.  It completes by adding
157 *  the thread to the local object table so operations on this
158 *  thread id are allowed.
159 *
160 *  @note If stack_area is NULL, it is allocated from the workspace.
161 *
162 *  @note If the stack is allocated from the workspace, then it is
163 *        guaranteed to be of at least minimum size.
164 */
165bool _Thread_Initialize(
166  Thread_Information                   *information,
167  Thread_Control                       *the_thread,
168  const struct Scheduler_Control       *scheduler,
169  void                                 *stack_area,
170  size_t                                stack_size,
171  bool                                  is_fp,
172  Priority_Control                      priority,
173  bool                                  is_preemptible,
174  Thread_CPU_budget_algorithms          budget_algorithm,
175  Thread_CPU_budget_algorithm_callout   budget_callout,
176  uint32_t                              isr_level,
177  Objects_Name                          name
178);
179
180/**
181 *  @brief Initializes thread and executes it.
182 *
183 *  This routine initializes the executable information for a thread
184 *  and makes it ready to execute.  After this routine executes, the
185 *  thread competes with all other threads for CPU time.
186 *
187 *  @param the_thread The thread to be started.
188 *  @param entry The thread entry information.
189 */
190bool _Thread_Start(
191  Thread_Control                 *the_thread,
192  const Thread_Entry_information *entry,
193  ISR_lock_Context               *lock_context
194);
195
196void _Thread_Restart_self(
197  Thread_Control                 *executing,
198  const Thread_Entry_information *entry,
199  ISR_lock_Context               *lock_context
200) RTEMS_NO_RETURN;
201
202bool _Thread_Restart_other(
203  Thread_Control                 *the_thread,
204  const Thread_Entry_information *entry,
205  ISR_lock_Context               *lock_context
206);
207
208void _Thread_Yield( Thread_Control *executing );
209
210Thread_Life_state _Thread_Change_life(
211  Thread_Life_state clear,
212  Thread_Life_state set,
213  Thread_Life_state ignore
214);
215
216Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
217
218/**
219 * @brief Kills all zombie threads in the system.
220 *
221 * Threads change into the zombie state as the last step in the thread
222 * termination sequence right before a context switch to the heir thread is
223 * initiated.  Since the thread stack is still in use during this phase we have
224 * to postpone the thread stack reclamation until this point.  On SMP
225 * configurations we may have to busy wait for context switch completion here.
226 */
227void _Thread_Kill_zombies( void );
228
229void _Thread_Exit(
230  Thread_Control    *executing,
231  Thread_Life_state  set,
232  void              *exit_value
233);
234
235void _Thread_Join(
236  Thread_Control       *the_thread,
237  States_Control        waiting_for_join,
238  Thread_Control       *executing,
239  Thread_queue_Context *queue_context
240);
241
242void _Thread_Cancel(
243  Thread_Control *the_thread,
244  Thread_Control *executing,
245  void           *exit_value
246);
247
248typedef struct {
249  Thread_queue_Context  Base;
250  Thread_Control       *cancel;
251} Thread_Close_context;
252
253/**
254 * @brief Closes the thread.
255 *
256 * Closes the thread object and starts the thread termination sequence.  In
257 * case the executing thread is not terminated, then this function waits until
258 * the terminating thread reached the zombie state.
259 */
260void _Thread_Close(
261  Thread_Control       *the_thread,
262  Thread_Control       *executing,
263  Thread_Close_context *context
264);
265
266RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
267{
268  return _States_Is_ready( the_thread->current_state );
269}
270
271States_Control _Thread_Clear_state_locked(
272  Thread_Control *the_thread,
273  States_Control  state
274);
275
276/**
277 * @brief Clears the specified thread state.
278 *
279 * In case the previous state is a non-ready state and the next state is the
280 * ready state, then the thread is unblocked by the scheduler.
281 *
282 * @param[in] the_thread The thread.
283 * @param[in] state The state to clear.  It must not be zero.
284 *
285 * @return The previous state.
286 */
287States_Control _Thread_Clear_state(
288  Thread_Control *the_thread,
289  States_Control  state
290);
291
292States_Control _Thread_Set_state_locked(
293  Thread_Control *the_thread,
294  States_Control  state
295);
296
297/**
298 * @brief Sets the specified thread state.
299 *
300 * In case the previous state is the ready state, then the thread is blocked by
301 * the scheduler.
302 *
303 * @param[in] the_thread The thread.
304 * @param[in] state The state to set.  It must not be zero.
305 *
306 * @return The previous state.
307 */
308States_Control _Thread_Set_state(
309  Thread_Control *the_thread,
310  States_Control  state
311);
312
313/**
314 *  @brief Initializes enviroment for a thread.
315 *
316 *  This routine initializes the context of @a the_thread to its
317 *  appropriate starting state.
318 *
319 *  @param[in] the_thread is the pointer to the thread control block.
320 */
321void _Thread_Load_environment(
322  Thread_Control *the_thread
323);
324
325void _Thread_Entry_adaptor_idle( Thread_Control *executing );
326
327void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
328
329void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
330
331/**
332 *  @brief Wrapper function for all threads.
333 *
334 *  This routine is the wrapper function for all threads.  It is
335 *  the starting point for all threads.  The user provided thread
336 *  entry point is invoked by this routine.  Operations
337 *  which must be performed immediately before and after the user's
338 *  thread executes are found here.
339 *
340 *  @note On entry, it is assumed all interrupts are blocked and that this
341 *  routine needs to set the initial isr level.  This may or may not
342 *  actually be needed by the context switch routine and as a result
343 *  interrupts may already be at there proper level.  Either way,
344 *  setting the initial isr level properly here is safe.
345 */
346void _Thread_Handler( void );
347
348/**
349 * @brief Executes the global constructors and then restarts itself as the
350 * first initialization thread.
351 *
352 * The first initialization thread is the first RTEMS initialization task or
353 * the first POSIX initialization thread in case no RTEMS initialization tasks
354 * are present.
355 */
356void _Thread_Global_construction(
357  Thread_Control                 *executing,
358  const Thread_Entry_information *entry
359) RTEMS_NO_RETURN;
360
361/**
362 *  @brief Ended the delay of a thread.
363 *
364 *  This routine is invoked when a thread must be unblocked at the
365 *  end of a time based delay (i.e. wake after or wake when).
366 *  It is called by the watchdog handler.
367 *
368 *  @param[in] id is the thread id
369 *  @param[in] ignored is not used
370 */
371void _Thread_Delay_ended(
372  Objects_Id  id,
373  void       *ignored
374);
375
376RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
377  Thread_Control   *the_thread,
378  ISR_lock_Context *lock_context
379)
380{
381  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
382}
383
384RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
385  Thread_Control   *the_thread,
386  ISR_lock_Context *lock_context
387)
388{
389  _ISR_lock_ISR_disable( lock_context );
390  _Thread_State_acquire_critical( the_thread, lock_context );
391}
392
393RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
394  ISR_lock_Context *lock_context
395)
396{
397  Thread_Control *executing;
398
399  _ISR_lock_ISR_disable( lock_context );
400  executing = _Thread_Executing;
401  _Thread_State_acquire_critical( executing, lock_context );
402
403  return executing;
404}
405
406RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
407  Thread_Control   *the_thread,
408  ISR_lock_Context *lock_context
409)
410{
411  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
412}
413
414RTEMS_INLINE_ROUTINE void _Thread_State_release(
415  Thread_Control   *the_thread,
416  ISR_lock_Context *lock_context
417)
418{
419  _Thread_State_release_critical( the_thread, lock_context );
420  _ISR_lock_ISR_enable( lock_context );
421}
422
423#if defined(RTEMS_DEBUG)
424RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
425  const Thread_Control *the_thread
426)
427{
428  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
429}
430#endif
431
432/**
433 * @brief Performs the priority actions specified by the thread queue context
434 * along the thread queue path.
435 *
436 * The caller must be the owner of the thread wait lock.
437 *
438 * @param start_of_path The start thread of the thread queue path.
439 * @param queue_context The thread queue context specifying the thread queue
440 *   path and initial thread priority actions.
441 *
442 * @see _Thread_queue_Path_acquire_critical().
443 */
444void _Thread_Priority_perform_actions(
445  Thread_Control       *start_of_path,
446  Thread_queue_Context *queue_context
447);
448
449/**
450 * @brief Adds the specified thread priority node to the corresponding thread
451 * priority aggregation.
452 *
453 * The caller must be the owner of the thread wait lock.
454 *
455 * @param the_thread The thread.
456 * @param priority_node The thread priority node to add.
457 * @param queue_context The thread queue context to return an updated set of
458 *   threads for _Thread_Priority_update().  The thread queue context must be
459 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
460 *   call of this function.
461 *
462 * @see _Thread_Wait_acquire().
463 */
464void _Thread_Priority_add(
465  Thread_Control       *the_thread,
466  Priority_Node        *priority_node,
467  Thread_queue_Context *queue_context
468);
469
470/**
471 * @brief Removes the specified thread priority node from the corresponding
472 * thread priority aggregation.
473 *
474 * The caller must be the owner of the thread wait lock.
475 *
476 * @param the_thread The thread.
477 * @param priority_node The thread priority node to remove.
478 * @param queue_context The thread queue context to return an updated set of
479 *   threads for _Thread_Priority_update().  The thread queue context must be
480 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
481 *   call of this function.
482 *
483 * @see _Thread_Wait_acquire().
484 */
485void _Thread_Priority_remove(
486  Thread_Control       *the_thread,
487  Priority_Node        *priority_node,
488  Thread_queue_Context *queue_context
489);
490
491/**
492 * @brief Propagates a thread priority value change in the specified thread
493 * priority node to the corresponding thread priority aggregation.
494 *
495 * The caller must be the owner of the thread wait lock.
496 *
497 * @param the_thread The thread.
498 * @param priority_node The thread priority node to change.
499 * @param prepend_it In case this is true, then the thread is prepended to
500 *   its priority group in its home scheduler instance, otherwise it is
501 *   appended.
502 * @param queue_context The thread queue context to return an updated set of
503 *   threads for _Thread_Priority_update().  The thread queue context must be
504 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
505 *   call of this function.
506 *
507 * @see _Thread_Wait_acquire().
508 */
509void _Thread_Priority_changed(
510  Thread_Control       *the_thread,
511  Priority_Node        *priority_node,
512  bool                  prepend_it,
513  Thread_queue_Context *queue_context
514);
515
516/**
517 * @brief Changes the thread priority value of the specified thread priority
518 * node in the corresponding thread priority aggregation.
519 *
520 * The caller must be the owner of the thread wait lock.
521 *
522 * @param the_thread The thread.
523 * @param priority_node The thread priority node to change.
524 * @param new_priority The new thread priority value of the thread priority
525 *   node to change.
526 * @param prepend_it In case this is true, then the thread is prepended to
527 *   its priority group in its home scheduler instance, otherwise it is
528 *   appended.
529 * @param queue_context The thread queue context to return an updated set of
530 *   threads for _Thread_Priority_update().  The thread queue context must be
531 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
532 *   call of this function.
533 *
534 * @see _Thread_Wait_acquire().
535 */
536RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
537  Thread_Control       *the_thread,
538  Priority_Node        *priority_node,
539  Priority_Control      new_priority,
540  bool                  prepend_it,
541  Thread_queue_Context *queue_context
542)
543{
544  _Priority_Node_set_priority( priority_node, new_priority );
545  _Thread_Priority_changed(
546    the_thread,
547    priority_node,
548    prepend_it,
549    queue_context
550  );
551}
552
553/**
554 * @brief Replaces the victim priority node with the replacement priority node
555 * in the corresponding thread priority aggregation.
556 *
557 * The caller must be the owner of the thread wait lock.
558 *
559 * @param the_thread The thread.
560 * @param victim_node The victim thread priority node.
561 * @param replacement_node The replacement thread priority node.
562 *
563 * @see _Thread_Wait_acquire().
564 */
565void _Thread_Priority_replace(
566  Thread_Control *the_thread,
567  Priority_Node  *victim_node,
568  Priority_Node  *replacement_node
569);
570
571/**
572 * @brief Adds a priority node to the corresponding thread priority
573 * aggregation.
574 *
575 * The caller must be the owner of the thread wait lock.
576 *
577 * @param the_thread The thread.
578 * @param priority_node The thread priority node to add.
579 * @param queue_context The thread queue context to return an updated set of
580 *   threads for _Thread_Priority_update().  The thread queue context must be
581 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
582 *   call of this function.
583 *
584 * @see _Thread_Priority_add(), _Thread_Priority_change(),
585 *   _Thread_Priority_changed() and _Thread_Priority_remove().
586 */
587void _Thread_Priority_update( Thread_queue_Context *queue_context );
588
589#if defined(RTEMS_SMP)
590void _Thread_Priority_and_sticky_update(
591  Thread_Control *the_thread,
592  int             sticky_level_change
593);
594#endif
595
596/**
597 * @brief Returns true if the left thread priority is less than the right
598 * thread priority in the intuitive sense of priority and false otherwise.
599 */
600RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
601  Priority_Control left,
602  Priority_Control right
603)
604{
605  return left > right;
606}
607
608/**
609 * @brief Returns the highest priority of the left and right thread priorities
610 * in the intuitive sense of priority.
611 */
612RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
613  Priority_Control left,
614  Priority_Control right
615)
616{
617  return _Thread_Priority_less_than( left, right ) ? right : left;
618}
619
620RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
621  Objects_Id id
622)
623{
624  uint32_t the_api;
625
626  the_api = _Objects_Get_API( id );
627
628  if ( !_Objects_Is_api_valid( the_api ) ) {
629    return NULL;
630  }
631
632  /*
633   * Threads are always first class :)
634   *
635   * There is no need to validate the object class of the object identifier,
636   * since this will be done by the object get methods.
637   */
638  return _Objects_Information_table[ the_api ][ 1 ];
639}
640
641/**
642 * @brief Gets a thread by its identifier.
643 *
644 * @see _Objects_Get().
645 */
646Thread_Control *_Thread_Get(
647  Objects_Id         id,
648  ISR_lock_Context  *lock_context
649);
650
651RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
652  const Thread_Control *thread
653)
654{
655#if defined(RTEMS_SMP)
656  return thread->Scheduler.cpu;
657#else
658  (void) thread;
659
660  return _Per_CPU_Get();
661#endif
662}
663
664RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
665  Thread_Control *thread,
666  Per_CPU_Control *cpu
667)
668{
669#if defined(RTEMS_SMP)
670  thread->Scheduler.cpu = cpu;
671#else
672  (void) thread;
673  (void) cpu;
674#endif
675}
676
677/**
678 * This function returns true if the_thread is the currently executing
679 * thread, and false otherwise.
680 */
681
682RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
683  const Thread_Control *the_thread
684)
685{
686  return ( the_thread == _Thread_Executing );
687}
688
689#if defined(RTEMS_SMP)
690/**
691 * @brief Returns @a true in case the thread executes currently on some
692 * processor in the system, otherwise @a false.
693 *
694 * Do not confuse this with _Thread_Is_executing() which checks only the
695 * current processor.
696 */
697RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
698  const Thread_Control *the_thread
699)
700{
701  return _CPU_Context_Get_is_executing( &the_thread->Registers );
702}
703#endif
704
705/**
706 * This function returns true if the_thread is the heir
707 * thread, and false otherwise.
708 */
709
710RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
711  const Thread_Control *the_thread
712)
713{
714  return ( the_thread == _Thread_Heir );
715}
716
717/**
718 * This routine clears any blocking state for the_thread.  It performs
719 * any necessary scheduling operations including the selection of
720 * a new heir thread.
721 */
722
723RTEMS_INLINE_ROUTINE void _Thread_Unblock (
724  Thread_Control *the_thread
725)
726{
727  _Thread_Clear_state( the_thread, STATES_BLOCKED );
728}
729
730/**
731 * This function returns true if the floating point context of
732 * the_thread is currently loaded in the floating point unit, and
733 * false otherwise.
734 */
735
736#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
737RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
738  const Thread_Control *the_thread
739)
740{
741  return ( the_thread == _Thread_Allocated_fp );
742}
743#endif
744
745/*
746 *  If the CPU has hardware floating point, then we must address saving
747 *  and restoring it as part of the context switch.
748 *
749 *  The second conditional compilation section selects the algorithm used
750 *  to context switch between floating point tasks.  The deferred algorithm
751 *  can be significantly better in a system with few floating point tasks
752 *  because it reduces the total number of save and restore FP context
753 *  operations.  However, this algorithm can not be used on all CPUs due
754 *  to unpredictable use of FP registers by some compilers for integer
755 *  operations.
756 */
757
758RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
759{
760#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
761#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
762  if ( executing->fp_context != NULL )
763    _Context_Save_fp( &executing->fp_context );
764#endif
765#endif
766}
767
768RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
769{
770#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
771#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
772  if ( (executing->fp_context != NULL) &&
773       !_Thread_Is_allocated_fp( executing ) ) {
774    if ( _Thread_Allocated_fp != NULL )
775      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
776    _Context_Restore_fp( &executing->fp_context );
777    _Thread_Allocated_fp = executing;
778  }
779#else
780  if ( executing->fp_context != NULL )
781    _Context_Restore_fp( &executing->fp_context );
782#endif
783#endif
784}
785
786/**
787 * This routine is invoked when the currently loaded floating
788 * point context is now longer associated with an active thread.
789 */
790
791#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
792RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
793{
794  _Thread_Allocated_fp = NULL;
795}
796#endif
797
798/**
799 * This function returns true if dispatching is disabled, and false
800 * otherwise.
801 */
802
803RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
804{
805  return ( _Thread_Dispatch_necessary );
806}
807
808/**
809 * This function returns true if the_thread is NULL and false otherwise.
810 */
811
812RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
813  const Thread_Control *the_thread
814)
815{
816  return ( the_thread == NULL );
817}
818
819/**
820 * @brief Is proxy blocking.
821 *
822 * status which indicates that a proxy is blocking, and false otherwise.
823 */
824RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
825  uint32_t   code
826)
827{
828  return (code == THREAD_STATUS_PROXY_BLOCKING);
829}
830
831RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
832{
833  /* Idle threads */
834  uint32_t maximum_internal_threads =
835    rtems_configuration_get_maximum_processors();
836
837  /* MPCI thread */
838#if defined(RTEMS_MULTIPROCESSING)
839  if ( _System_state_Is_multiprocessing ) {
840    ++maximum_internal_threads;
841  }
842#endif
843
844  return maximum_internal_threads;
845}
846
847RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
848{
849  return (Thread_Control *)
850    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
851}
852
853/**
854 * @brief Gets the heir of the processor and makes it executing.
855 *
856 * Must be called with interrupts disabled.  The thread dispatch necessary
857 * indicator is cleared as a side-effect.
858 *
859 * @return The heir thread.
860 *
861 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
862 * _Thread_Dispatch_update_heir().
863 */
864RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
865  Per_CPU_Control *cpu_self
866)
867{
868  Thread_Control *heir;
869
870  heir = cpu_self->heir;
871  cpu_self->dispatch_necessary = false;
872  cpu_self->executing = heir;
873
874  return heir;
875}
876
877RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
878  Thread_Control  *the_thread,
879  Per_CPU_Control *cpu
880)
881{
882  Timestamp_Control last;
883  Timestamp_Control ran;
884
885  last = cpu->cpu_usage_timestamp;
886  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
887  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
888  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
889}
890
891#if defined( RTEMS_SMP )
892RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
893  Per_CPU_Control *cpu_self,
894  Per_CPU_Control *cpu_for_heir,
895  Thread_Control  *heir
896)
897{
898  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
899
900  cpu_for_heir->heir = heir;
901
902  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
903}
904#endif
905
906void _Thread_Get_CPU_time_used(
907  Thread_Control    *the_thread,
908  Timestamp_Control *cpu_time_used
909);
910
911RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
912  Thread_Action_control *action_control
913)
914{
915  _Chain_Initialize_empty( &action_control->Chain );
916}
917
918RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
919  Thread_Action *action
920)
921{
922  _Chain_Set_off_chain( &action->Node );
923}
924
925RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
926  Thread_Control        *the_thread,
927  Thread_Action         *action,
928  Thread_Action_handler  handler
929)
930{
931  Per_CPU_Control *cpu_of_thread;
932
933  _Assert( _Thread_State_is_owner( the_thread ) );
934
935  cpu_of_thread = _Thread_Get_CPU( the_thread );
936
937  action->handler = handler;
938
939  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
940
941  _Chain_Append_if_is_off_chain_unprotected(
942    &the_thread->Post_switch_actions.Chain,
943    &action->Node
944  );
945}
946
947RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
948  Thread_Life_state life_state
949)
950{
951  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
952}
953
954RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
955  Thread_Life_state life_state
956)
957{
958  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
959}
960
961RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
962  Thread_Life_state life_state
963)
964{
965  return ( life_state
966    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
967}
968
969RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
970  Thread_Life_state life_state
971)
972{
973  return ( life_state
974    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
975}
976
977RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
978  const Thread_Control *the_thread
979)
980{
981  _Assert( _Thread_State_is_owner( the_thread ) );
982  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
983}
984
985RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
986  Thread_Control *the_thread
987)
988{
989#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
990  ++the_thread->resource_count;
991#else
992  (void) the_thread;
993#endif
994}
995
996RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
997  Thread_Control *the_thread
998)
999{
1000#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1001  --the_thread->resource_count;
1002#else
1003  (void) the_thread;
1004#endif
1005}
1006
1007#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1008/**
1009 * @brief Returns true if the thread owns resources, and false otherwise.
1010 *
1011 * Resources are accounted with the Thread_Control::resource_count resource
1012 * counter.  This counter is used by mutex objects for example.
1013 *
1014 * @param[in] the_thread The thread.
1015 */
1016RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
1017  const Thread_Control *the_thread
1018)
1019{
1020  return the_thread->resource_count != 0;
1021}
1022#endif
1023
1024#if defined(RTEMS_SMP)
1025RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
1026  Thread_Control  *the_thread,
1027  Per_CPU_Control *cpu
1028)
1029{
1030  _Per_CPU_Acquire( cpu );
1031
1032  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
1033    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
1034    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
1035  }
1036
1037  _Per_CPU_Release( cpu );
1038}
1039#endif
1040
1041RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
1042  const Thread_Control *the_thread
1043)
1044{
1045#if defined(RTEMS_SMP)
1046  return the_thread->Scheduler.home;
1047#else
1048  (void) the_thread;
1049  return &_Scheduler_Table[ 0 ];
1050#endif
1051}
1052
1053RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1054  const Thread_Control *the_thread
1055)
1056{
1057#if defined(RTEMS_SMP)
1058  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1059  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1060    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1061  );
1062#else
1063  return the_thread->Scheduler.nodes;
1064#endif
1065}
1066
1067RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1068  const Thread_Control *the_thread,
1069  size_t                scheduler_index
1070)
1071{
1072#if defined(RTEMS_SMP)
1073  return (Scheduler_Node *)
1074    ( (uintptr_t) the_thread->Scheduler.nodes
1075      + scheduler_index * _Scheduler_Node_size );
1076#else
1077  _Assert( scheduler_index == 0 );
1078  (void) scheduler_index;
1079  return the_thread->Scheduler.nodes;
1080#endif
1081}
1082
1083#if defined(RTEMS_SMP)
1084RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1085  Thread_Control   *the_thread,
1086  ISR_lock_Context *lock_context
1087)
1088{
1089  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1090}
1091
1092RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1093  Thread_Control   *the_thread,
1094  ISR_lock_Context *lock_context
1095)
1096{
1097  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1098}
1099
1100#if defined(RTEMS_SMP)
1101void _Thread_Scheduler_ask_for_help( Thread_Control *the_thread );
1102
1103void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1104#endif
1105
1106RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1107  Thread_Control         *the_thread,
1108  Scheduler_Node         *scheduler_node,
1109  Scheduler_Node_request  request
1110)
1111{
1112  ISR_lock_Context       lock_context;
1113  Scheduler_Node_request current_request;
1114
1115  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1116
1117  current_request = scheduler_node->Thread.request;
1118
1119  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1120    _Assert(
1121      request == SCHEDULER_NODE_REQUEST_ADD
1122        || request == SCHEDULER_NODE_REQUEST_REMOVE
1123    );
1124    _Assert( scheduler_node->Thread.next_request == NULL );
1125    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1126    the_thread->Scheduler.requests = scheduler_node;
1127  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1128    _Assert(
1129      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1130        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1131      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1132        && request == SCHEDULER_NODE_REQUEST_ADD )
1133    );
1134    request = SCHEDULER_NODE_REQUEST_NOTHING;
1135  }
1136
1137  scheduler_node->Thread.request = request;
1138
1139  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1140}
1141
1142RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1143  Thread_Control *the_thread,
1144  Scheduler_Node *scheduler_node
1145)
1146{
1147  _Chain_Append_unprotected(
1148    &the_thread->Scheduler.Wait_nodes,
1149    &scheduler_node->Thread.Wait_node
1150  );
1151  _Thread_Scheduler_add_request(
1152    the_thread,
1153    scheduler_node,
1154    SCHEDULER_NODE_REQUEST_ADD
1155  );
1156}
1157
1158RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1159  Thread_Control *the_thread,
1160  Scheduler_Node *scheduler_node
1161)
1162{
1163  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1164  _Thread_Scheduler_add_request(
1165    the_thread,
1166    scheduler_node,
1167    SCHEDULER_NODE_REQUEST_REMOVE
1168  );
1169}
1170#endif
1171
1172/**
1173 * @brief Returns the priority of the thread.
1174 *
1175 * Returns the user API and thread wait information relevant thread priority.
1176 * This includes temporary thread priority adjustments due to locking
1177 * protocols, a job release or the POSIX sporadic server for example.
1178 *
1179 * @return The priority of the thread.
1180 */
1181RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1182  const Thread_Control *the_thread
1183)
1184{
1185  Scheduler_Node *scheduler_node;
1186
1187  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1188  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1189}
1190
1191/**
1192 * @brief Acquires the thread wait default lock inside a critical section
1193 * (interrupts disabled).
1194 *
1195 * @param[in] the_thread The thread.
1196 * @param[in] lock_context The lock context used for the corresponding lock
1197 *   release.
1198 *
1199 * @see _Thread_Wait_release_default_critical().
1200 */
1201RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1202  Thread_Control   *the_thread,
1203  ISR_lock_Context *lock_context
1204)
1205{
1206  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1207}
1208
1209/**
1210 * @brief Acquires the thread wait default lock and returns the executing
1211 * thread.
1212 *
1213 * @param[in] lock_context The lock context used for the corresponding lock
1214 *   release.
1215 *
1216 * @return The executing thread.
1217 *
1218 * @see _Thread_Wait_release_default().
1219 */
1220RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1221  ISR_lock_Context *lock_context
1222)
1223{
1224  Thread_Control *executing;
1225
1226  _ISR_lock_ISR_disable( lock_context );
1227  executing = _Thread_Executing;
1228  _Thread_Wait_acquire_default_critical( executing, lock_context );
1229
1230  return executing;
1231}
1232
1233/**
1234 * @brief Acquires the thread wait default lock and disables interrupts.
1235 *
1236 * @param[in] the_thread The thread.
1237 * @param[in] lock_context The lock context used for the corresponding lock
1238 *   release.
1239 *
1240 * @see _Thread_Wait_release_default().
1241 */
1242RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1243  Thread_Control   *the_thread,
1244  ISR_lock_Context *lock_context
1245)
1246{
1247  _ISR_lock_ISR_disable( lock_context );
1248  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1249}
1250
1251/**
1252 * @brief Releases the thread wait default lock inside a critical section
1253 * (interrupts disabled).
1254 *
1255 * The previous interrupt status is not restored.
1256 *
1257 * @param[in] the_thread The thread.
1258 * @param[in] lock_context The lock context used for the corresponding lock
1259 *   acquire.
1260 */
1261RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1262  Thread_Control   *the_thread,
1263  ISR_lock_Context *lock_context
1264)
1265{
1266  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1267}
1268
1269/**
1270 * @brief Releases the thread wait default lock and restores the previous
1271 * interrupt status.
1272 *
1273 * @param[in] the_thread The thread.
1274 * @param[in] lock_context The lock context used for the corresponding lock
1275 *   acquire.
1276 */
1277RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1278  Thread_Control   *the_thread,
1279  ISR_lock_Context *lock_context
1280)
1281{
1282  _Thread_Wait_release_default_critical( the_thread, lock_context );
1283  _ISR_lock_ISR_enable( lock_context );
1284}
1285
1286#if defined(RTEMS_SMP)
1287#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1288  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1289
1290RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1291  Thread_Control            *the_thread,
1292  Thread_queue_Lock_context *queue_lock_context
1293)
1294{
1295  Chain_Node *first;
1296
1297  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1298  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1299
1300  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1301    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1302  }
1303}
1304
1305RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1306  Thread_queue_Queue        *queue,
1307  Thread_queue_Lock_context *queue_lock_context
1308)
1309{
1310  _Thread_queue_Queue_acquire_critical(
1311    queue,
1312    &_Thread_Executing->Potpourri_stats,
1313    &queue_lock_context->Lock_context
1314  );
1315}
1316
1317RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1318  Thread_queue_Queue        *queue,
1319  Thread_queue_Lock_context *queue_lock_context
1320)
1321{
1322  _Thread_queue_Queue_release_critical(
1323    queue,
1324    &queue_lock_context->Lock_context
1325  );
1326}
1327#endif
1328
1329/**
1330 * @brief Acquires the thread wait lock inside a critical section (interrupts
1331 * disabled).
1332 *
1333 * @param[in] the_thread The thread.
1334 * @param[in] queue_context The thread queue context for the corresponding
1335 *   _Thread_Wait_release_critical().
1336 */
1337RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1338  Thread_Control       *the_thread,
1339  Thread_queue_Context *queue_context
1340)
1341{
1342#if defined(RTEMS_SMP)
1343  Thread_queue_Queue *queue;
1344
1345  _Thread_Wait_acquire_default_critical(
1346    the_thread,
1347    &queue_context->Lock_context.Lock_context
1348  );
1349
1350  queue = the_thread->Wait.queue;
1351  queue_context->Lock_context.Wait.queue = queue;
1352
1353  if ( queue != NULL ) {
1354    _Thread_queue_Gate_add(
1355      &the_thread->Wait.Lock.Pending_requests,
1356      &queue_context->Lock_context.Wait.Gate
1357    );
1358    _Thread_Wait_release_default_critical(
1359      the_thread,
1360      &queue_context->Lock_context.Lock_context
1361    );
1362    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1363
1364    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1365      _Thread_Wait_release_queue_critical(
1366        queue,
1367        &queue_context->Lock_context
1368      );
1369      _Thread_Wait_acquire_default_critical(
1370        the_thread,
1371        &queue_context->Lock_context.Lock_context
1372      );
1373      _Thread_Wait_remove_request_locked(
1374        the_thread,
1375        &queue_context->Lock_context
1376      );
1377      _Assert( the_thread->Wait.queue == NULL );
1378    }
1379  }
1380#else
1381  (void) the_thread;
1382  (void) queue_context;
1383#endif
1384}
1385
1386/**
1387 * @brief Acquires the thread wait default lock and disables interrupts.
1388 *
1389 * @param[in] the_thread The thread.
1390 * @param[in] queue_context The thread queue context for the corresponding
1391 *   _Thread_Wait_release().
1392 */
1393RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1394  Thread_Control       *the_thread,
1395  Thread_queue_Context *queue_context
1396)
1397{
1398  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1399  _Thread_Wait_acquire_critical( the_thread, queue_context );
1400}
1401
1402/**
1403 * @brief Releases the thread wait lock inside a critical section (interrupts
1404 * disabled).
1405 *
1406 * The previous interrupt status is not restored.
1407 *
1408 * @param[in] the_thread The thread.
1409 * @param[in] queue_context The thread queue context used for corresponding
1410 *   _Thread_Wait_acquire_critical().
1411 */
1412RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1413  Thread_Control       *the_thread,
1414  Thread_queue_Context *queue_context
1415)
1416{
1417#if defined(RTEMS_SMP)
1418  Thread_queue_Queue *queue;
1419
1420  queue = queue_context->Lock_context.Wait.queue;
1421
1422  if ( queue != NULL ) {
1423    _Thread_Wait_release_queue_critical(
1424      queue, &queue_context->Lock_context
1425    );
1426    _Thread_Wait_acquire_default_critical(
1427      the_thread,
1428      &queue_context->Lock_context.Lock_context
1429    );
1430    _Thread_Wait_remove_request_locked(
1431      the_thread,
1432      &queue_context->Lock_context
1433    );
1434  }
1435
1436  _Thread_Wait_release_default_critical(
1437    the_thread,
1438    &queue_context->Lock_context.Lock_context
1439  );
1440#else
1441  (void) the_thread;
1442  (void) queue_context;
1443#endif
1444}
1445
1446/**
1447 * @brief Releases the thread wait lock and restores the previous interrupt
1448 * status.
1449 *
1450 * @param[in] the_thread The thread.
1451 * @param[in] queue_context The thread queue context used for corresponding
1452 *   _Thread_Wait_acquire().
1453 */
1454RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1455  Thread_Control       *the_thread,
1456  Thread_queue_Context *queue_context
1457)
1458{
1459  _Thread_Wait_release_critical( the_thread, queue_context );
1460  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1461}
1462
1463/**
1464 * @brief Claims the thread wait queue.
1465 *
1466 * The caller must not be the owner of the default thread wait lock.  The
1467 * caller must be the owner of the corresponding thread queue lock.  The
1468 * registration of the corresponding thread queue operations is deferred and
1469 * done after the deadlock detection.  This is crucial to support timeouts on
1470 * SMP configurations.
1471 *
1472 * @param[in] the_thread The thread.
1473 * @param[in] queue The new thread queue.
1474 *
1475 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
1476 */
1477RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1478  Thread_Control     *the_thread,
1479  Thread_queue_Queue *queue
1480)
1481{
1482  ISR_lock_Context lock_context;
1483
1484  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1485
1486  _Assert( the_thread->Wait.queue == NULL );
1487
1488#if defined(RTEMS_SMP)
1489  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1490  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1491  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1492#endif
1493
1494  the_thread->Wait.queue = queue;
1495
1496  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1497}
1498
1499/**
1500 * @brief Finalizes the thread wait queue claim via registration of the
1501 * corresponding thread queue operations.
1502 *
1503 * @param[in] the_thread The thread.
1504 * @param[in] operations The corresponding thread queue operations.
1505 */
1506RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
1507  Thread_Control                *the_thread,
1508  const Thread_queue_Operations *operations
1509)
1510{
1511  the_thread->Wait.operations = operations;
1512}
1513
1514/**
1515 * @brief Removes a thread wait lock request.
1516 *
1517 * On SMP configurations, removes a thread wait lock request.
1518 *
1519 * On other configurations, this function does nothing.
1520 *
1521 * @param[in] the_thread The thread.
1522 * @param[in] queue_lock_context The thread queue lock context used for
1523 *   corresponding _Thread_Wait_acquire().
1524 */
1525RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1526  Thread_Control            *the_thread,
1527  Thread_queue_Lock_context *queue_lock_context
1528)
1529{
1530#if defined(RTEMS_SMP)
1531  ISR_lock_Context lock_context;
1532
1533  _Thread_Wait_acquire_default( the_thread, &lock_context );
1534  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1535  _Thread_Wait_release_default( the_thread, &lock_context );
1536#else
1537  (void) the_thread;
1538  (void) queue_lock_context;
1539#endif
1540}
1541
1542/**
1543 * @brief Restores the default thread wait queue and operations.
1544 *
1545 * The caller must be the owner of the current thread wait queue lock.
1546 *
1547 * On SMP configurations, the pending requests are updated to use the stale
1548 * thread queue operations.
1549 *
1550 * @param[in] the_thread The thread.
1551 *
1552 * @see _Thread_Wait_claim().
1553 */
1554RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1555  Thread_Control *the_thread
1556)
1557{
1558#if defined(RTEMS_SMP)
1559  ISR_lock_Context  lock_context;
1560  Chain_Node       *node;
1561  const Chain_Node *tail;
1562
1563  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1564
1565  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1566  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1567
1568  if ( node != tail ) {
1569    do {
1570      Thread_queue_Context *queue_context;
1571
1572      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1573      queue_context->Lock_context.Wait.queue = NULL;
1574
1575      node = _Chain_Next( node );
1576    } while ( node != tail );
1577
1578    _Thread_queue_Gate_add(
1579      &the_thread->Wait.Lock.Pending_requests,
1580      &the_thread->Wait.Lock.Tranquilizer
1581    );
1582  } else {
1583    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1584  }
1585#endif
1586
1587  the_thread->Wait.queue = NULL;
1588  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1589
1590#if defined(RTEMS_SMP)
1591  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1592#endif
1593}
1594
1595/**
1596 * @brief Tranquilizes the thread after a wait on a thread queue.
1597 *
1598 * After the violent blocking procedure this function makes the thread calm and
1599 * peaceful again so that it can carry out its normal work.
1600 *
1601 * On SMP configurations, ensures that all pending thread wait lock requests
1602 * completed before the thread is able to begin a new thread wait procedure.
1603 *
1604 * On other configurations, this function does nothing.
1605 *
1606 * It must be called after a _Thread_Wait_claim() exactly once
1607 *  - after the corresponding thread queue lock was released, and
1608 *  - the default wait state is restored or some other processor is about to do
1609 *    this.
1610 *
1611 * @param[in] the_thread The thread.
1612 */
1613RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1614  Thread_Control *the_thread
1615)
1616{
1617#if defined(RTEMS_SMP)
1618  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1619#else
1620  (void) the_thread;
1621#endif
1622}
1623
1624/**
1625 * @brief Cancels a thread wait on a thread queue.
1626 *
1627 * @param[in] the_thread The thread.
1628 * @param[in] queue_context The thread queue context used for corresponding
1629 *   _Thread_Wait_acquire().
1630 */
1631RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1632  Thread_Control       *the_thread,
1633  Thread_queue_Context *queue_context
1634)
1635{
1636  Thread_queue_Queue *queue;
1637
1638  queue = the_thread->Wait.queue;
1639
1640#if defined(RTEMS_SMP)
1641  if ( queue != NULL ) {
1642    _Assert( queue_context->Lock_context.Wait.queue == queue );
1643#endif
1644
1645    ( *the_thread->Wait.operations->extract )(
1646      queue,
1647      the_thread,
1648      queue_context
1649    );
1650    _Thread_Wait_restore_default( the_thread );
1651
1652#if defined(RTEMS_SMP)
1653    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1654    queue_context->Lock_context.Wait.queue = queue;
1655  }
1656#endif
1657}
1658
1659/**
1660 * @brief The initial thread wait flags value set by _Thread_Initialize().
1661 */
1662#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1663
1664/**
1665 * @brief Mask to get the thread wait state flags.
1666 */
1667#define THREAD_WAIT_STATE_MASK 0xffU
1668
1669/**
1670 * @brief Indicates that the thread begins with the blocking operation.
1671 *
1672 * A blocking operation consists of an optional watchdog initialization and the
1673 * setting of the appropriate thread blocking state with the corresponding
1674 * scheduler block operation.
1675 */
1676#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1677
1678/**
1679 * @brief Indicates that the thread completed the blocking operation.
1680 */
1681#define THREAD_WAIT_STATE_BLOCKED 0x2U
1682
1683/**
1684 * @brief Indicates that a condition to end the thread wait occurred.
1685 *
1686 * This could be a timeout, a signal, an event or a resource availability.
1687 */
1688#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1689
1690/**
1691 * @brief Mask to get the thread wait class flags.
1692 */
1693#define THREAD_WAIT_CLASS_MASK 0xff00U
1694
1695/**
1696 * @brief Indicates that the thread waits for an event.
1697 */
1698#define THREAD_WAIT_CLASS_EVENT 0x100U
1699
1700/**
1701 * @brief Indicates that the thread waits for a system event.
1702 */
1703#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1704
1705/**
1706 * @brief Indicates that the thread waits for an object.
1707 */
1708#define THREAD_WAIT_CLASS_OBJECT 0x400U
1709
1710/**
1711 * @brief Indicates that the thread waits for a period.
1712 */
1713#define THREAD_WAIT_CLASS_PERIOD 0x800U
1714
1715RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1716  Thread_Control    *the_thread,
1717  Thread_Wait_flags  flags
1718)
1719{
1720#if defined(RTEMS_SMP)
1721  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1722#else
1723  the_thread->Wait.flags = flags;
1724#endif
1725}
1726
1727RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1728  const Thread_Control *the_thread
1729)
1730{
1731#if defined(RTEMS_SMP)
1732  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1733#else
1734  return the_thread->Wait.flags;
1735#endif
1736}
1737
1738RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
1739  const Thread_Control *the_thread
1740)
1741{
1742#if defined(RTEMS_SMP)
1743  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
1744#else
1745  return the_thread->Wait.flags;
1746#endif
1747}
1748
1749/**
1750 * @brief Tries to change the thread wait flags with release semantics in case
1751 * of success.
1752 *
1753 * Must be called inside a critical section (interrupts disabled).
1754 *
1755 * In case the wait flags are equal to the expected wait flags, then the wait
1756 * flags are set to the desired wait flags.
1757 *
1758 * @param[in] the_thread The thread.
1759 * @param[in] expected_flags The expected wait flags.
1760 * @param[in] desired_flags The desired wait flags.
1761 *
1762 * @retval true The wait flags were equal to the expected wait flags.
1763 * @retval false Otherwise.
1764 */
1765RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1766  Thread_Control    *the_thread,
1767  Thread_Wait_flags  expected_flags,
1768  Thread_Wait_flags  desired_flags
1769)
1770{
1771  _Assert( _ISR_Get_level() != 0 );
1772
1773#if defined(RTEMS_SMP)
1774  return _Atomic_Compare_exchange_uint(
1775    &the_thread->Wait.flags,
1776    &expected_flags,
1777    desired_flags,
1778    ATOMIC_ORDER_RELEASE,
1779    ATOMIC_ORDER_RELAXED
1780  );
1781#else
1782  bool success = ( the_thread->Wait.flags == expected_flags );
1783
1784  if ( success ) {
1785    the_thread->Wait.flags = desired_flags;
1786  }
1787
1788  return success;
1789#endif
1790}
1791
1792/**
1793 * @brief Tries to change the thread wait flags with acquire semantics.
1794 *
1795 * In case the wait flags are equal to the expected wait flags, then the wait
1796 * flags are set to the desired wait flags.
1797 *
1798 * @param[in] the_thread The thread.
1799 * @param[in] expected_flags The expected wait flags.
1800 * @param[in] desired_flags The desired wait flags.
1801 *
1802 * @retval true The wait flags were equal to the expected wait flags.
1803 * @retval false Otherwise.
1804 */
1805RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1806  Thread_Control    *the_thread,
1807  Thread_Wait_flags  expected_flags,
1808  Thread_Wait_flags  desired_flags
1809)
1810{
1811  bool success;
1812#if defined(RTEMS_SMP)
1813  return _Atomic_Compare_exchange_uint(
1814    &the_thread->Wait.flags,
1815    &expected_flags,
1816    desired_flags,
1817    ATOMIC_ORDER_ACQUIRE,
1818    ATOMIC_ORDER_ACQUIRE
1819  );
1820#else
1821  ISR_Level level;
1822
1823  _ISR_Local_disable( level );
1824
1825  success = _Thread_Wait_flags_try_change_release(
1826    the_thread,
1827    expected_flags,
1828    desired_flags
1829  );
1830
1831  _ISR_Local_enable( level );
1832#endif
1833
1834  return success;
1835}
1836
1837/**
1838 * @brief Returns the object identifier of the object containing the current
1839 * thread wait queue.
1840 *
1841 * This function may be used for debug and system information purposes.  The
1842 * caller must be the owner of the thread lock.
1843 *
1844 * @retval 0 The thread waits on no thread queue currently, the thread wait
1845 *   queue is not contained in an object, or the current thread state provides
1846 *   insufficient information, e.g. the thread is in the middle of a blocking
1847 *   operation.
1848 * @retval other The object identifier of the object containing the thread wait
1849 *   queue.
1850 */
1851Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1852
1853RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1854  const Thread_Control *the_thread
1855)
1856{
1857  return (Status_Control) the_thread->Wait.return_code;
1858}
1859
1860/**
1861 * @brief General purpose thread wait timeout.
1862 *
1863 * @param[in] watchdog The thread timer watchdog.
1864 */
1865void _Thread_Timeout( Watchdog_Control *watchdog );
1866
1867RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1868  Thread_Timer_information *timer,
1869  Per_CPU_Control          *cpu
1870)
1871{
1872  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1873  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1874  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1875}
1876
1877RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative(
1878  Thread_Control                 *the_thread,
1879  Per_CPU_Control                *cpu,
1880  Watchdog_Service_routine_entry  routine,
1881  Watchdog_Interval               ticks
1882)
1883{
1884  ISR_lock_Context lock_context;
1885
1886  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1887
1888  the_thread->Timer.header =
1889    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1890  the_thread->Timer.Watchdog.routine = routine;
1891  _Watchdog_Per_CPU_insert_relative( &the_thread->Timer.Watchdog, cpu, ticks );
1892
1893  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1894}
1895
1896RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_absolute(
1897  Thread_Control                 *the_thread,
1898  Per_CPU_Control                *cpu,
1899  Watchdog_Service_routine_entry  routine,
1900  uint64_t                        expire
1901)
1902{
1903  ISR_lock_Context lock_context;
1904
1905  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1906
1907  the_thread->Timer.header =
1908    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_ABSOLUTE ];
1909  the_thread->Timer.Watchdog.routine = routine;
1910  _Watchdog_Per_CPU_insert_absolute( &the_thread->Timer.Watchdog, cpu, expire );
1911
1912  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1913}
1914
1915RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1916{
1917  ISR_lock_Context lock_context;
1918
1919  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1920
1921  _Watchdog_Per_CPU_remove(
1922    &the_thread->Timer.Watchdog,
1923#if defined(RTEMS_SMP)
1924    the_thread->Timer.Watchdog.cpu,
1925#else
1926    _Per_CPU_Get(),
1927#endif
1928    the_thread->Timer.header
1929  );
1930
1931  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1932}
1933
1934RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1935  Thread_Control     *the_thread,
1936  Thread_queue_Queue *queue
1937)
1938{
1939  _Thread_Wait_tranquilize( the_thread );
1940  _Thread_Timer_remove( the_thread );
1941
1942#if defined(RTEMS_MULTIPROCESSING)
1943  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1944    _Thread_Unblock( the_thread );
1945  } else {
1946    _Thread_queue_Unblock_proxy( queue, the_thread );
1947  }
1948#else
1949  (void) queue;
1950  _Thread_Unblock( the_thread );
1951#endif
1952}
1953
1954size_t _Thread_Get_name(
1955  const Thread_Control *the_thread,
1956  char                 *buffer,
1957  size_t                buffer_size
1958);
1959
1960/** @}*/
1961
1962#ifdef __cplusplus
1963}
1964#endif
1965
1966#if defined(RTEMS_MULTIPROCESSING)
1967#include <rtems/score/threadmp.h>
1968#endif
1969
1970#endif
1971/* end of include file */
Note: See TracBrowser for help on using the repository browser.