source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 7f742432

5
Last change on this file since 7f742432 was 7f742432, checked in by Sebastian Huber <sebastian.huber@…>, on 10/31/16 at 07:22:02

score: Delete Thread_Scheduler_control::own_node

Update #2556.

  • Property mode set to 100644
File size: 50.6 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2016 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/schedulernodeimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/threadqimpl.h>
35#include <rtems/score/todimpl.h>
36#include <rtems/score/freechain.h>
37#include <rtems/score/watchdogimpl.h>
38#include <rtems/config.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/**
45 * @addtogroup ScoreThread
46 */
47/**@{**/
48
49/**
50 *  The following structure contains the information necessary to manage
51 *  a thread which it is  waiting for a resource.
52 */
53#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
54
55/**
56 *  Self for the GNU Ada Run-Time
57 */
58extern void *rtems_ada_self;
59
60typedef struct {
61  Objects_Information Objects;
62
63  Freechain_Control Free_thread_queue_heads;
64} Thread_Information;
65
66/**
67 *  The following defines the information control block used to
68 *  manage this class of objects.
69 */
70extern Thread_Information _Thread_Internal_information;
71
72/**
73 *  The following points to the thread whose floating point
74 *  context is currently loaded.
75 */
76#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
77extern Thread_Control *_Thread_Allocated_fp;
78#endif
79
80#if defined(RTEMS_SMP)
81#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
82  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
83#endif
84
85typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
86
87void _Thread_Iterate(
88  Thread_Visitor  visitor,
89  void           *arg
90);
91
92void _Thread_Initialize_information(
93  Thread_Information  *information,
94  Objects_APIs         the_api,
95  uint16_t             the_class,
96  uint32_t             maximum,
97  bool                 is_string,
98  uint32_t             maximum_name_length
99);
100
101/**
102 *  @brief Initialize thread handler.
103 *
104 *  This routine performs the initialization necessary for this handler.
105 */
106void _Thread_Handler_initialization(void);
107
108/**
109 *  @brief Create idle thread.
110 *
111 *  This routine creates the idle thread.
112 *
113 *  @warning No thread should be created before this one.
114 */
115void _Thread_Create_idle(void);
116
117/**
118 *  @brief Start thread multitasking.
119 *
120 *  This routine initiates multitasking.  It is invoked only as
121 *  part of initialization and its invocation is the last act of
122 *  the non-multitasking part of the system initialization.
123 */
124void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
125
126/**
127 *  @brief Allocate the requested stack space for the thread.
128 *
129 *  Allocate the requested stack space for the thread.
130 *  Set the Start.stack field to the address of the stack.
131 *
132 *  @param[in] the_thread is the thread where the stack space is requested
133 *  @param[in] stack_size is the stack space is requested
134 *
135 *  @retval actual size allocated after any adjustment
136 *  @retval zero if the allocation failed
137 */
138size_t _Thread_Stack_Allocate(
139  Thread_Control *the_thread,
140  size_t          stack_size
141);
142
143/**
144 *  @brief Deallocate thread stack.
145 *
146 *  Deallocate the Thread's stack.
147 */
148void _Thread_Stack_Free(
149  Thread_Control *the_thread
150);
151
152/**
153 *  @brief Initialize thread.
154 *
155 *  This routine initializes the specified the thread.  It allocates
156 *  all memory associated with this thread.  It completes by adding
157 *  the thread to the local object table so operations on this
158 *  thread id are allowed.
159 *
160 *  @note If stack_area is NULL, it is allocated from the workspace.
161 *
162 *  @note If the stack is allocated from the workspace, then it is
163 *        guaranteed to be of at least minimum size.
164 */
165bool _Thread_Initialize(
166  Thread_Information                   *information,
167  Thread_Control                       *the_thread,
168  const struct Scheduler_Control       *scheduler,
169  void                                 *stack_area,
170  size_t                                stack_size,
171  bool                                  is_fp,
172  Priority_Control                      priority,
173  bool                                  is_preemptible,
174  Thread_CPU_budget_algorithms          budget_algorithm,
175  Thread_CPU_budget_algorithm_callout   budget_callout,
176  uint32_t                              isr_level,
177  Objects_Name                          name
178);
179
180/**
181 *  @brief Initializes thread and executes it.
182 *
183 *  This routine initializes the executable information for a thread
184 *  and makes it ready to execute.  After this routine executes, the
185 *  thread competes with all other threads for CPU time.
186 *
187 *  @param the_thread The thread to be started.
188 *  @param entry The thread entry information.
189 */
190bool _Thread_Start(
191  Thread_Control                 *the_thread,
192  const Thread_Entry_information *entry,
193  ISR_lock_Context               *lock_context
194);
195
196void _Thread_Restart_self(
197  Thread_Control                 *executing,
198  const Thread_Entry_information *entry,
199  ISR_lock_Context               *lock_context
200) RTEMS_NO_RETURN;
201
202bool _Thread_Restart_other(
203  Thread_Control                 *the_thread,
204  const Thread_Entry_information *entry,
205  ISR_lock_Context               *lock_context
206);
207
208void _Thread_Yield( Thread_Control *executing );
209
210Thread_Life_state _Thread_Change_life(
211  Thread_Life_state clear,
212  Thread_Life_state set,
213  Thread_Life_state ignore
214);
215
216Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
217
218/**
219 * @brief Kills all zombie threads in the system.
220 *
221 * Threads change into the zombie state as the last step in the thread
222 * termination sequence right before a context switch to the heir thread is
223 * initiated.  Since the thread stack is still in use during this phase we have
224 * to postpone the thread stack reclamation until this point.  On SMP
225 * configurations we may have to busy wait for context switch completion here.
226 */
227void _Thread_Kill_zombies( void );
228
229void _Thread_Exit(
230  Thread_Control    *executing,
231  Thread_Life_state  set,
232  void              *exit_value
233);
234
235void _Thread_Join(
236  Thread_Control       *the_thread,
237  States_Control        waiting_for_join,
238  Thread_Control       *executing,
239  Thread_queue_Context *queue_context
240);
241
242void _Thread_Cancel(
243  Thread_Control *the_thread,
244  Thread_Control *executing,
245  void           *exit_value
246);
247
248/**
249 * @brief Closes the thread.
250 *
251 * Closes the thread object and starts the thread termination sequence.  In
252 * case the executing thread is not terminated, then this function waits until
253 * the terminating thread reached the zombie state.
254 */
255void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
256
257RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
258{
259  return _States_Is_ready( the_thread->current_state );
260}
261
262States_Control _Thread_Clear_state_locked(
263  Thread_Control *the_thread,
264  States_Control  state
265);
266
267/**
268 * @brief Clears the specified thread state.
269 *
270 * In case the previous state is a non-ready state and the next state is the
271 * ready state, then the thread is unblocked by the scheduler.
272 *
273 * @param[in] the_thread The thread.
274 * @param[in] state The state to clear.  It must not be zero.
275 *
276 * @return The previous state.
277 */
278States_Control _Thread_Clear_state(
279  Thread_Control *the_thread,
280  States_Control  state
281);
282
283States_Control _Thread_Set_state_locked(
284  Thread_Control *the_thread,
285  States_Control  state
286);
287
288/**
289 * @brief Sets the specified thread state.
290 *
291 * In case the previous state is the ready state, then the thread is blocked by
292 * the scheduler.
293 *
294 * @param[in] the_thread The thread.
295 * @param[in] state The state to set.  It must not be zero.
296 *
297 * @return The previous state.
298 */
299States_Control _Thread_Set_state(
300  Thread_Control *the_thread,
301  States_Control  state
302);
303
304/**
305 *  @brief Initializes enviroment for a thread.
306 *
307 *  This routine initializes the context of @a the_thread to its
308 *  appropriate starting state.
309 *
310 *  @param[in] the_thread is the pointer to the thread control block.
311 */
312void _Thread_Load_environment(
313  Thread_Control *the_thread
314);
315
316void _Thread_Entry_adaptor_idle( Thread_Control *executing );
317
318void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
319
320void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
321
322/**
323 *  @brief Wrapper function for all threads.
324 *
325 *  This routine is the wrapper function for all threads.  It is
326 *  the starting point for all threads.  The user provided thread
327 *  entry point is invoked by this routine.  Operations
328 *  which must be performed immediately before and after the user's
329 *  thread executes are found here.
330 *
331 *  @note On entry, it is assumed all interrupts are blocked and that this
332 *  routine needs to set the initial isr level.  This may or may not
333 *  actually be needed by the context switch routine and as a result
334 *  interrupts may already be at there proper level.  Either way,
335 *  setting the initial isr level properly here is safe.
336 */
337void _Thread_Handler( void );
338
339/**
340 * @brief Executes the global constructors and then restarts itself as the
341 * first initialization thread.
342 *
343 * The first initialization thread is the first RTEMS initialization task or
344 * the first POSIX initialization thread in case no RTEMS initialization tasks
345 * are present.
346 */
347void _Thread_Global_construction(
348  Thread_Control                 *executing,
349  const Thread_Entry_information *entry
350) RTEMS_NO_RETURN;
351
352/**
353 *  @brief Ended the delay of a thread.
354 *
355 *  This routine is invoked when a thread must be unblocked at the
356 *  end of a time based delay (i.e. wake after or wake when).
357 *  It is called by the watchdog handler.
358 *
359 *  @param[in] id is the thread id
360 *  @param[in] ignored is not used
361 */
362void _Thread_Delay_ended(
363  Objects_Id  id,
364  void       *ignored
365);
366
367RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
368  Thread_Control   *the_thread,
369  ISR_lock_Context *lock_context
370)
371{
372  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
373}
374
375RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
376  Thread_Control   *the_thread,
377  ISR_lock_Context *lock_context
378)
379{
380  _ISR_lock_ISR_disable( lock_context );
381  _Thread_State_acquire_critical( the_thread, lock_context );
382}
383
384RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
385  ISR_lock_Context *lock_context
386)
387{
388  Thread_Control *executing;
389
390  _ISR_lock_ISR_disable( lock_context );
391  executing = _Thread_Executing;
392  _Thread_State_acquire_critical( executing, lock_context );
393
394  return executing;
395}
396
397RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
398  Thread_Control   *the_thread,
399  ISR_lock_Context *lock_context
400)
401{
402  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
403}
404
405RTEMS_INLINE_ROUTINE void _Thread_State_release(
406  Thread_Control   *the_thread,
407  ISR_lock_Context *lock_context
408)
409{
410  _Thread_State_release_critical( the_thread, lock_context );
411  _ISR_lock_ISR_enable( lock_context );
412}
413
414#if defined(RTEMS_DEBUG)
415RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
416  const Thread_Control *the_thread
417)
418{
419  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
420}
421#endif
422
423/**
424 * @brief Performs the priority actions specified by the thread queue context
425 * along the thread queue path.
426 *
427 * The caller must be the owner of the thread wait lock.
428 *
429 * @param start_of_path The start thread of the thread queue path.
430 * @param queue_context The thread queue context specifying the thread queue
431 *   path and initial thread priority actions.
432 *
433 * @see _Thread_queue_Path_acquire_critical().
434 */
435void _Thread_Priority_perform_actions(
436  Thread_Control       *start_of_path,
437  Thread_queue_Context *queue_context
438);
439
440/**
441 * @brief Adds the specified thread priority node to the corresponding thread
442 * priority aggregation.
443 *
444 * The caller must be the owner of the thread wait lock.
445 *
446 * @param the_thread The thread.
447 * @param priority_node The thread priority node to add.
448 * @param queue_context The thread queue context to return an updated set of
449 *   threads for _Thread_Priority_update().  The thread queue context must be
450 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
451 *   call of this function.
452 *
453 * @see _Thread_Wait_acquire().
454 */
455void _Thread_Priority_add(
456  Thread_Control       *the_thread,
457  Priority_Node        *priority_node,
458  Thread_queue_Context *queue_context
459);
460
461/**
462 * @brief Removes the specified thread priority node from the corresponding
463 * thread priority aggregation.
464 *
465 * The caller must be the owner of the thread wait lock.
466 *
467 * @param the_thread The thread.
468 * @param priority_node The thread priority node to remove.
469 * @param queue_context The thread queue context to return an updated set of
470 *   threads for _Thread_Priority_update().  The thread queue context must be
471 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
472 *   call of this function.
473 *
474 * @see _Thread_Wait_acquire().
475 */
476void _Thread_Priority_remove(
477  Thread_Control       *the_thread,
478  Priority_Node        *priority_node,
479  Thread_queue_Context *queue_context
480);
481
482/**
483 * @brief Propagates a thread priority value change in the specified thread
484 * priority node to the corresponding thread priority aggregation.
485 *
486 * The caller must be the owner of the thread wait lock.
487 *
488 * @param the_thread The thread.
489 * @param priority_node The thread priority node to change.
490 * @param prepend_it In case this is true, then the thread is prepended to
491 *   its priority group in its home scheduler instance, otherwise it is
492 *   appended.
493 * @param queue_context The thread queue context to return an updated set of
494 *   threads for _Thread_Priority_update().  The thread queue context must be
495 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
496 *   call of this function.
497 *
498 * @see _Thread_Wait_acquire().
499 */
500void _Thread_Priority_changed(
501  Thread_Control       *the_thread,
502  Priority_Node        *priority_node,
503  bool                  prepend_it,
504  Thread_queue_Context *queue_context
505);
506
507/**
508 * @brief Changes the thread priority value of the specified thread priority
509 * node in the corresponding thread priority aggregation.
510 *
511 * The caller must be the owner of the thread wait lock.
512 *
513 * @param the_thread The thread.
514 * @param priority_node The thread priority node to change.
515 * @param new_priority The new thread priority value of the thread priority
516 *   node to change.
517 * @param prepend_it In case this is true, then the thread is prepended to
518 *   its priority group in its home scheduler instance, otherwise it is
519 *   appended.
520 * @param queue_context The thread queue context to return an updated set of
521 *   threads for _Thread_Priority_update().  The thread queue context must be
522 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
523 *   call of this function.
524 *
525 * @see _Thread_Wait_acquire().
526 */
527RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
528  Thread_Control       *the_thread,
529  Priority_Node        *priority_node,
530  Priority_Control      new_priority,
531  bool                  prepend_it,
532  Thread_queue_Context *queue_context
533)
534{
535  _Priority_Node_set_priority( priority_node, new_priority );
536  _Thread_Priority_changed(
537    the_thread,
538    priority_node,
539    prepend_it,
540    queue_context
541  );
542}
543
544/**
545 * @brief Replaces the victim priority node with the replacement priority node
546 * in the corresponding thread priority aggregation.
547 *
548 * The caller must be the owner of the thread wait lock.
549 *
550 * @param the_thread The thread.
551 * @param victim_node The victim thread priority node.
552 * @param replacement_node The replacement thread priority node.
553 *
554 * @see _Thread_Wait_acquire().
555 */
556void _Thread_Priority_replace(
557  Thread_Control *the_thread,
558  Priority_Node  *victim_node,
559  Priority_Node  *replacement_node
560);
561
562/**
563 * @brief Adds a priority node to the corresponding thread priority
564 * aggregation.
565 *
566 * The caller must be the owner of the thread wait lock.
567 *
568 * @param the_thread The thread.
569 * @param priority_node The thread priority node to add.
570 * @param queue_context The thread queue context to return an updated set of
571 *   threads for _Thread_Priority_update().  The thread queue context must be
572 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
573 *   call of this function.
574 *
575 * @see _Thread_Priority_add(), _Thread_Priority_change(),
576 *   _Thread_Priority_changed() and _Thread_Priority_remove().
577 */
578void _Thread_Priority_update( Thread_queue_Context *queue_context );
579
580#if defined(RTEMS_SMP)
581void _Thread_Priority_and_sticky_update(
582  Thread_Control *the_thread,
583  int             sticky_level_change
584);
585#endif
586
587/**
588 * @brief Returns true if the left thread priority is less than the right
589 * thread priority in the intuitive sense of priority and false otherwise.
590 */
591RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
592  Priority_Control left,
593  Priority_Control right
594)
595{
596  return left > right;
597}
598
599/**
600 * @brief Returns the highest priority of the left and right thread priorities
601 * in the intuitive sense of priority.
602 */
603RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
604  Priority_Control left,
605  Priority_Control right
606)
607{
608  return _Thread_Priority_less_than( left, right ) ? right : left;
609}
610
611RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
612  Objects_Id id
613)
614{
615  uint32_t the_api;
616
617  the_api = _Objects_Get_API( id );
618
619  if ( !_Objects_Is_api_valid( the_api ) ) {
620    return NULL;
621  }
622
623  /*
624   * Threads are always first class :)
625   *
626   * There is no need to validate the object class of the object identifier,
627   * since this will be done by the object get methods.
628   */
629  return _Objects_Information_table[ the_api ][ 1 ];
630}
631
632/**
633 * @brief Gets a thread by its identifier.
634 *
635 * @see _Objects_Get().
636 */
637Thread_Control *_Thread_Get(
638  Objects_Id         id,
639  ISR_lock_Context  *lock_context
640);
641
642RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
643  const Thread_Control *thread
644)
645{
646#if defined(RTEMS_SMP)
647  return thread->Scheduler.cpu;
648#else
649  (void) thread;
650
651  return _Per_CPU_Get();
652#endif
653}
654
655RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
656  Thread_Control *thread,
657  Per_CPU_Control *cpu
658)
659{
660#if defined(RTEMS_SMP)
661  thread->Scheduler.cpu = cpu;
662#else
663  (void) thread;
664  (void) cpu;
665#endif
666}
667
668/**
669 * This function returns true if the_thread is the currently executing
670 * thread, and false otherwise.
671 */
672
673RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
674  const Thread_Control *the_thread
675)
676{
677  return ( the_thread == _Thread_Executing );
678}
679
680#if defined(RTEMS_SMP)
681/**
682 * @brief Returns @a true in case the thread executes currently on some
683 * processor in the system, otherwise @a false.
684 *
685 * Do not confuse this with _Thread_Is_executing() which checks only the
686 * current processor.
687 */
688RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
689  const Thread_Control *the_thread
690)
691{
692  return _CPU_Context_Get_is_executing( &the_thread->Registers );
693}
694#endif
695
696/**
697 * This function returns true if the_thread is the heir
698 * thread, and false otherwise.
699 */
700
701RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
702  const Thread_Control *the_thread
703)
704{
705  return ( the_thread == _Thread_Heir );
706}
707
708/**
709 * This routine clears any blocking state for the_thread.  It performs
710 * any necessary scheduling operations including the selection of
711 * a new heir thread.
712 */
713
714RTEMS_INLINE_ROUTINE void _Thread_Unblock (
715  Thread_Control *the_thread
716)
717{
718  _Thread_Clear_state( the_thread, STATES_BLOCKED );
719}
720
721/**
722 * This function returns true if the floating point context of
723 * the_thread is currently loaded in the floating point unit, and
724 * false otherwise.
725 */
726
727#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
728RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
729  const Thread_Control *the_thread
730)
731{
732  return ( the_thread == _Thread_Allocated_fp );
733}
734#endif
735
736/*
737 *  If the CPU has hardware floating point, then we must address saving
738 *  and restoring it as part of the context switch.
739 *
740 *  The second conditional compilation section selects the algorithm used
741 *  to context switch between floating point tasks.  The deferred algorithm
742 *  can be significantly better in a system with few floating point tasks
743 *  because it reduces the total number of save and restore FP context
744 *  operations.  However, this algorithm can not be used on all CPUs due
745 *  to unpredictable use of FP registers by some compilers for integer
746 *  operations.
747 */
748
749RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
750{
751#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
752#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
753  if ( executing->fp_context != NULL )
754    _Context_Save_fp( &executing->fp_context );
755#endif
756#endif
757}
758
759RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
760{
761#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
762#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
763  if ( (executing->fp_context != NULL) &&
764       !_Thread_Is_allocated_fp( executing ) ) {
765    if ( _Thread_Allocated_fp != NULL )
766      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
767    _Context_Restore_fp( &executing->fp_context );
768    _Thread_Allocated_fp = executing;
769  }
770#else
771  if ( executing->fp_context != NULL )
772    _Context_Restore_fp( &executing->fp_context );
773#endif
774#endif
775}
776
777/**
778 * This routine is invoked when the currently loaded floating
779 * point context is now longer associated with an active thread.
780 */
781
782#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
783RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
784{
785  _Thread_Allocated_fp = NULL;
786}
787#endif
788
789/**
790 * This function returns true if dispatching is disabled, and false
791 * otherwise.
792 */
793
794RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
795{
796  return ( _Thread_Dispatch_necessary );
797}
798
799/**
800 * This function returns true if the_thread is NULL and false otherwise.
801 */
802
803RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
804  const Thread_Control *the_thread
805)
806{
807  return ( the_thread == NULL );
808}
809
810/**
811 * @brief Is proxy blocking.
812 *
813 * status which indicates that a proxy is blocking, and false otherwise.
814 */
815RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
816  uint32_t   code
817)
818{
819  return (code == THREAD_STATUS_PROXY_BLOCKING);
820}
821
822RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
823{
824  /* Idle threads */
825  uint32_t maximum_internal_threads =
826    rtems_configuration_get_maximum_processors();
827
828  /* MPCI thread */
829#if defined(RTEMS_MULTIPROCESSING)
830  if ( _System_state_Is_multiprocessing ) {
831    ++maximum_internal_threads;
832  }
833#endif
834
835  return maximum_internal_threads;
836}
837
838RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
839{
840  return (Thread_Control *)
841    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
842}
843
844/**
845 * @brief Gets the heir of the processor and makes it executing.
846 *
847 * Must be called with interrupts disabled.  The thread dispatch necessary
848 * indicator is cleared as a side-effect.
849 *
850 * @return The heir thread.
851 *
852 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
853 * _Thread_Dispatch_update_heir().
854 */
855RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
856  Per_CPU_Control *cpu_self
857)
858{
859  Thread_Control *heir;
860
861  heir = cpu_self->heir;
862  cpu_self->dispatch_necessary = false;
863  cpu_self->executing = heir;
864
865  return heir;
866}
867
868RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
869  Thread_Control  *the_thread,
870  Per_CPU_Control *cpu
871)
872{
873  Timestamp_Control last;
874  Timestamp_Control ran;
875
876  last = cpu->cpu_usage_timestamp;
877  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
878  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
879  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
880}
881
882#if defined( RTEMS_SMP )
883RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
884  Per_CPU_Control *cpu_self,
885  Per_CPU_Control *cpu_for_heir,
886  Thread_Control  *heir
887)
888{
889  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
890
891  cpu_for_heir->heir = heir;
892
893  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
894}
895#endif
896
897void _Thread_Get_CPU_time_used(
898  Thread_Control    *the_thread,
899  Timestamp_Control *cpu_time_used
900);
901
902RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
903  Thread_Action_control *action_control
904)
905{
906  _Chain_Initialize_empty( &action_control->Chain );
907}
908
909RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
910  Thread_Action *action
911)
912{
913  _Chain_Set_off_chain( &action->Node );
914}
915
916RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
917  Thread_Control        *the_thread,
918  Thread_Action         *action,
919  Thread_Action_handler  handler
920)
921{
922  Per_CPU_Control *cpu_of_thread;
923
924  _Assert( _Thread_State_is_owner( the_thread ) );
925
926  cpu_of_thread = _Thread_Get_CPU( the_thread );
927
928  action->handler = handler;
929
930  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
931
932  _Chain_Append_if_is_off_chain_unprotected(
933    &the_thread->Post_switch_actions.Chain,
934    &action->Node
935  );
936}
937
938RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
939  Thread_Life_state life_state
940)
941{
942  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
943}
944
945RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
946  Thread_Life_state life_state
947)
948{
949  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
950}
951
952RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
953  Thread_Life_state life_state
954)
955{
956  return ( life_state
957    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
958}
959
960RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
961  Thread_Life_state life_state
962)
963{
964  return ( life_state
965    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
966}
967
968RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
969  const Thread_Control *the_thread
970)
971{
972  _Assert( _Thread_State_is_owner( the_thread ) );
973  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
974}
975
976/**
977 * @brief Returns true if the thread owns resources, and false otherwise.
978 *
979 * Resources are accounted with the Thread_Control::resource_count resource
980 * counter.  This counter is used by mutex objects for example.
981 *
982 * @param[in] the_thread The thread.
983 */
984RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
985  const Thread_Control *the_thread
986)
987{
988  return the_thread->resource_count != 0;
989}
990
991#if defined(RTEMS_SMP)
992RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
993  Thread_Control  *the_thread,
994  Per_CPU_Control *cpu
995)
996{
997  _Per_CPU_Acquire( cpu );
998
999  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
1000    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
1001    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
1002  }
1003
1004  _Per_CPU_Release( cpu );
1005}
1006#endif
1007
1008RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1009  const Thread_Control *the_thread
1010)
1011{
1012#if defined(RTEMS_SMP)
1013  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1014  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1015    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1016  );
1017#else
1018  return the_thread->Scheduler.nodes;
1019#endif
1020}
1021
1022RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1023  const Thread_Control *the_thread,
1024  size_t                scheduler_index
1025)
1026{
1027#if defined(RTEMS_SMP)
1028  return (Scheduler_Node *)
1029    ( (uintptr_t) the_thread->Scheduler.nodes
1030      + scheduler_index * _Scheduler_Node_size );
1031#else
1032  _Assert( scheduler_index == 0 );
1033  (void) scheduler_index;
1034  return the_thread->Scheduler.nodes;
1035#endif
1036}
1037
1038#if defined(RTEMS_SMP)
1039RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1040  Thread_Control   *the_thread,
1041  ISR_lock_Context *lock_context
1042)
1043{
1044  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1045}
1046
1047RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1048  Thread_Control   *the_thread,
1049  ISR_lock_Context *lock_context
1050)
1051{
1052  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1053}
1054
1055#if defined(RTEMS_SMP)
1056void _Thread_Scheduler_ask_for_help( Thread_Control *the_thread );
1057
1058void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1059#endif
1060
1061RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1062  Thread_Control         *the_thread,
1063  Scheduler_Node         *scheduler_node,
1064  Scheduler_Node_request  request
1065)
1066{
1067  ISR_lock_Context       lock_context;
1068  Scheduler_Node_request current_request;
1069
1070  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1071
1072  current_request = scheduler_node->Thread.request;
1073
1074  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1075    _Assert(
1076      request == SCHEDULER_NODE_REQUEST_ADD
1077        || request == SCHEDULER_NODE_REQUEST_REMOVE
1078    );
1079    _Assert( scheduler_node->Thread.next_request == NULL );
1080    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1081    the_thread->Scheduler.requests = scheduler_node;
1082  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1083    _Assert(
1084      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1085        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1086      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1087        && request == SCHEDULER_NODE_REQUEST_ADD )
1088    );
1089    request = SCHEDULER_NODE_REQUEST_NOTHING;
1090  }
1091
1092  scheduler_node->Thread.request = request;
1093
1094  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1095}
1096
1097RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1098  Thread_Control *the_thread,
1099  Scheduler_Node *scheduler_node
1100)
1101{
1102  _Chain_Append_unprotected(
1103    &the_thread->Scheduler.Wait_nodes,
1104    &scheduler_node->Thread.Wait_node
1105  );
1106  _Thread_Scheduler_add_request(
1107    the_thread,
1108    scheduler_node,
1109    SCHEDULER_NODE_REQUEST_ADD
1110  );
1111}
1112
1113RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1114  Thread_Control *the_thread,
1115  Scheduler_Node *scheduler_node
1116)
1117{
1118  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1119  _Thread_Scheduler_add_request(
1120    the_thread,
1121    scheduler_node,
1122    SCHEDULER_NODE_REQUEST_REMOVE
1123  );
1124}
1125#endif
1126
1127/**
1128 * @brief Returns the priority of the thread.
1129 *
1130 * Returns the user API and thread wait information relevant thread priority.
1131 * This includes temporary thread priority adjustments due to locking
1132 * protocols, a job release or the POSIX sporadic server for example.
1133 *
1134 * @return The priority of the thread.
1135 */
1136RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1137  const Thread_Control *the_thread
1138)
1139{
1140  Scheduler_Node *scheduler_node;
1141
1142  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1143  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1144}
1145
1146/**
1147 * @brief Acquires the thread wait default lock inside a critical section
1148 * (interrupts disabled).
1149 *
1150 * @param[in] the_thread The thread.
1151 * @param[in] lock_context The lock context used for the corresponding lock
1152 *   release.
1153 *
1154 * @see _Thread_Wait_release_default_critical().
1155 */
1156RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1157  Thread_Control   *the_thread,
1158  ISR_lock_Context *lock_context
1159)
1160{
1161  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1162}
1163
1164/**
1165 * @brief Acquires the thread wait default lock and returns the executing
1166 * thread.
1167 *
1168 * @param[in] lock_context The lock context used for the corresponding lock
1169 *   release.
1170 *
1171 * @return The executing thread.
1172 *
1173 * @see _Thread_Wait_release_default().
1174 */
1175RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1176  ISR_lock_Context *lock_context
1177)
1178{
1179  Thread_Control *executing;
1180
1181  _ISR_lock_ISR_disable( lock_context );
1182  executing = _Thread_Executing;
1183  _Thread_Wait_acquire_default_critical( executing, lock_context );
1184
1185  return executing;
1186}
1187
1188/**
1189 * @brief Acquires the thread wait default lock and disables interrupts.
1190 *
1191 * @param[in] the_thread The thread.
1192 * @param[in] lock_context The lock context used for the corresponding lock
1193 *   release.
1194 *
1195 * @see _Thread_Wait_release_default().
1196 */
1197RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1198  Thread_Control   *the_thread,
1199  ISR_lock_Context *lock_context
1200)
1201{
1202  _ISR_lock_ISR_disable( lock_context );
1203  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1204}
1205
1206/**
1207 * @brief Releases the thread wait default lock inside a critical section
1208 * (interrupts disabled).
1209 *
1210 * The previous interrupt status is not restored.
1211 *
1212 * @param[in] the_thread The thread.
1213 * @param[in] lock_context The lock context used for the corresponding lock
1214 *   acquire.
1215 */
1216RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1217  Thread_Control   *the_thread,
1218  ISR_lock_Context *lock_context
1219)
1220{
1221  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1222}
1223
1224/**
1225 * @brief Releases the thread wait default lock and restores the previous
1226 * interrupt status.
1227 *
1228 * @param[in] the_thread The thread.
1229 * @param[in] lock_context The lock context used for the corresponding lock
1230 *   acquire.
1231 */
1232RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1233  Thread_Control   *the_thread,
1234  ISR_lock_Context *lock_context
1235)
1236{
1237  _Thread_Wait_release_default_critical( the_thread, lock_context );
1238  _ISR_lock_ISR_enable( lock_context );
1239}
1240
1241#if defined(RTEMS_SMP)
1242#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1243  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1244
1245RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1246  Thread_Control            *the_thread,
1247  Thread_queue_Lock_context *queue_lock_context
1248)
1249{
1250  Chain_Node *first;
1251
1252  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1253  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1254
1255  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1256    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1257  }
1258}
1259
1260RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1261  Thread_queue_Queue        *queue,
1262  Thread_queue_Lock_context *queue_lock_context
1263)
1264{
1265  _Thread_queue_Queue_acquire_critical(
1266    queue,
1267    &_Thread_Executing->Potpourri_stats,
1268    &queue_lock_context->Lock_context
1269  );
1270}
1271
1272RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1273  Thread_queue_Queue        *queue,
1274  Thread_queue_Lock_context *queue_lock_context
1275)
1276{
1277  _Thread_queue_Queue_release_critical(
1278    queue,
1279    &queue_lock_context->Lock_context
1280  );
1281}
1282#endif
1283
1284/**
1285 * @brief Acquires the thread wait lock inside a critical section (interrupts
1286 * disabled).
1287 *
1288 * @param[in] the_thread The thread.
1289 * @param[in] queue_context The thread queue context for the corresponding
1290 *   _Thread_Wait_release_critical().
1291 *
1292 * @see _Thread_queue_Context_initialize().
1293 */
1294RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1295  Thread_Control       *the_thread,
1296  Thread_queue_Context *queue_context
1297)
1298{
1299#if defined(RTEMS_SMP)
1300  Thread_queue_Queue *queue;
1301
1302  _Thread_Wait_acquire_default_critical(
1303    the_thread,
1304    &queue_context->Lock_context.Lock_context
1305  );
1306
1307  queue = the_thread->Wait.queue;
1308  queue_context->Lock_context.Wait.queue = queue;
1309
1310  if ( queue != NULL ) {
1311    _Thread_queue_Gate_add(
1312      &the_thread->Wait.Lock.Pending_requests,
1313      &queue_context->Lock_context.Wait.Gate
1314    );
1315    _Thread_Wait_release_default_critical(
1316      the_thread,
1317      &queue_context->Lock_context.Lock_context
1318    );
1319    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1320
1321    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1322      _Thread_Wait_release_queue_critical(
1323        queue,
1324        &queue_context->Lock_context
1325      );
1326      _Thread_Wait_acquire_default_critical(
1327        the_thread,
1328        &queue_context->Lock_context.Lock_context
1329      );
1330      _Thread_Wait_remove_request_locked(
1331        the_thread,
1332        &queue_context->Lock_context
1333      );
1334      _Assert( the_thread->Wait.queue == NULL );
1335    }
1336  }
1337#else
1338  (void) the_thread;
1339  (void) queue_context;
1340#endif
1341}
1342
1343/**
1344 * @brief Acquires the thread wait default lock and disables interrupts.
1345 *
1346 * @param[in] the_thread The thread.
1347 * @param[in] queue_context The thread queue context for the corresponding
1348 *   _Thread_Wait_release().
1349 */
1350RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1351  Thread_Control       *the_thread,
1352  Thread_queue_Context *queue_context
1353)
1354{
1355  _Thread_queue_Context_initialize( queue_context );
1356  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1357  _Thread_Wait_acquire_critical( the_thread, queue_context );
1358}
1359
1360/**
1361 * @brief Releases the thread wait lock inside a critical section (interrupts
1362 * disabled).
1363 *
1364 * The previous interrupt status is not restored.
1365 *
1366 * @param[in] the_thread The thread.
1367 * @param[in] queue_context The thread queue context used for corresponding
1368 *   _Thread_Wait_acquire_critical().
1369 */
1370RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1371  Thread_Control       *the_thread,
1372  Thread_queue_Context *queue_context
1373)
1374{
1375#if defined(RTEMS_SMP)
1376  Thread_queue_Queue *queue;
1377
1378  queue = queue_context->Lock_context.Wait.queue;
1379
1380  if ( queue != NULL ) {
1381    _Thread_Wait_release_queue_critical(
1382      queue, &queue_context->Lock_context
1383    );
1384    _Thread_Wait_acquire_default_critical(
1385      the_thread,
1386      &queue_context->Lock_context.Lock_context
1387    );
1388    _Thread_Wait_remove_request_locked(
1389      the_thread,
1390      &queue_context->Lock_context
1391    );
1392  }
1393
1394  _Thread_Wait_release_default_critical(
1395    the_thread,
1396    &queue_context->Lock_context.Lock_context
1397  );
1398#else
1399  (void) the_thread;
1400  (void) queue_context;
1401#endif
1402}
1403
1404/**
1405 * @brief Releases the thread wait lock and restores the previous interrupt
1406 * status.
1407 *
1408 * @param[in] the_thread The thread.
1409 * @param[in] queue_context The thread queue context used for corresponding
1410 *   _Thread_Wait_acquire().
1411 */
1412RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1413  Thread_Control       *the_thread,
1414  Thread_queue_Context *queue_context
1415)
1416{
1417  _Thread_Wait_release_critical( the_thread, queue_context );
1418  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1419}
1420
1421/**
1422 * @brief Claims the thread wait queue.
1423 *
1424 * The caller must not be the owner of the default thread wait lock.  The
1425 * caller must be the owner of the corresponding thread queue lock.  The
1426 * registration of the corresponding thread queue operations is deferred and
1427 * done after the deadlock detection.  This is crucial to support timeouts on
1428 * SMP configurations.
1429 *
1430 * @param[in] the_thread The thread.
1431 * @param[in] queue The new thread queue.
1432 *
1433 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
1434 */
1435RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1436  Thread_Control     *the_thread,
1437  Thread_queue_Queue *queue
1438)
1439{
1440  ISR_lock_Context lock_context;
1441
1442  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1443
1444  _Assert( the_thread->Wait.queue == NULL );
1445
1446#if defined(RTEMS_SMP)
1447  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1448  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1449  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1450#endif
1451
1452  the_thread->Wait.queue = queue;
1453
1454  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1455}
1456
1457/**
1458 * @brief Finalizes the thread wait queue claim via registration of the
1459 * corresponding thread queue operations.
1460 *
1461 * @param[in] the_thread The thread.
1462 * @param[in] operations The corresponding thread queue operations.
1463 */
1464RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
1465  Thread_Control                *the_thread,
1466  const Thread_queue_Operations *operations
1467)
1468{
1469  the_thread->Wait.operations = operations;
1470}
1471
1472/**
1473 * @brief Removes a thread wait lock request.
1474 *
1475 * On SMP configurations, removes a thread wait lock request.
1476 *
1477 * On other configurations, this function does nothing.
1478 *
1479 * @param[in] the_thread The thread.
1480 * @param[in] queue_lock_context The thread queue lock context used for
1481 *   corresponding _Thread_Wait_acquire().
1482 */
1483RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1484  Thread_Control            *the_thread,
1485  Thread_queue_Lock_context *queue_lock_context
1486)
1487{
1488#if defined(RTEMS_SMP)
1489  ISR_lock_Context lock_context;
1490
1491  _Thread_Wait_acquire_default( the_thread, &lock_context );
1492  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1493  _Thread_Wait_release_default( the_thread, &lock_context );
1494#else
1495  (void) the_thread;
1496  (void) queue_lock_context;
1497#endif
1498}
1499
1500/**
1501 * @brief Restores the default thread wait queue and operations.
1502 *
1503 * The caller must be the owner of the current thread wait queue lock.
1504 *
1505 * On SMP configurations, the pending requests are updated to use the stale
1506 * thread queue operations.
1507 *
1508 * @param[in] the_thread The thread.
1509 *
1510 * @see _Thread_Wait_claim().
1511 */
1512RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1513  Thread_Control *the_thread
1514)
1515{
1516#if defined(RTEMS_SMP)
1517  ISR_lock_Context  lock_context;
1518  Chain_Node       *node;
1519  const Chain_Node *tail;
1520
1521  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1522
1523  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1524  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1525
1526  if ( node != tail ) {
1527    do {
1528      Thread_queue_Context *queue_context;
1529
1530      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1531      queue_context->Lock_context.Wait.queue = NULL;
1532
1533      node = _Chain_Next( node );
1534    } while ( node != tail );
1535
1536    _Thread_queue_Gate_add(
1537      &the_thread->Wait.Lock.Pending_requests,
1538      &the_thread->Wait.Lock.Tranquilizer
1539    );
1540  } else {
1541    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1542  }
1543#endif
1544
1545  the_thread->Wait.queue = NULL;
1546  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1547
1548#if defined(RTEMS_SMP)
1549  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1550#endif
1551}
1552
1553/**
1554 * @brief Tranquilizes the thread after a wait on a thread queue.
1555 *
1556 * After the violent blocking procedure this function makes the thread calm and
1557 * peaceful again so that it can carry out its normal work.
1558 *
1559 * On SMP configurations, ensures that all pending thread wait lock requests
1560 * completed before the thread is able to begin a new thread wait procedure.
1561 *
1562 * On other configurations, this function does nothing.
1563 *
1564 * It must be called after a _Thread_Wait_claim() exactly once
1565 *  - after the corresponding thread queue lock was released, and
1566 *  - the default wait state is restored or some other processor is about to do
1567 *    this.
1568 *
1569 * @param[in] the_thread The thread.
1570 */
1571RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1572  Thread_Control *the_thread
1573)
1574{
1575#if defined(RTEMS_SMP)
1576  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1577#else
1578  (void) the_thread;
1579#endif
1580}
1581
1582/**
1583 * @brief Cancels a thread wait on a thread queue.
1584 *
1585 * @param[in] the_thread The thread.
1586 * @param[in] queue_context The thread queue context used for corresponding
1587 *   _Thread_Wait_acquire().
1588 */
1589RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1590  Thread_Control       *the_thread,
1591  Thread_queue_Context *queue_context
1592)
1593{
1594  Thread_queue_Queue *queue;
1595
1596  queue = the_thread->Wait.queue;
1597
1598#if defined(RTEMS_SMP)
1599  if ( queue != NULL ) {
1600    _Assert( queue_context->Lock_context.Wait.queue == queue );
1601#endif
1602
1603    ( *the_thread->Wait.operations->extract )(
1604      queue,
1605      the_thread,
1606      queue_context
1607    );
1608    _Thread_Wait_restore_default( the_thread );
1609
1610#if defined(RTEMS_SMP)
1611    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1612    queue_context->Lock_context.Wait.queue = queue;
1613  }
1614#endif
1615}
1616
1617/**
1618 * @brief The initial thread wait flags value set by _Thread_Initialize().
1619 */
1620#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1621
1622/**
1623 * @brief Mask to get the thread wait state flags.
1624 */
1625#define THREAD_WAIT_STATE_MASK 0xffU
1626
1627/**
1628 * @brief Indicates that the thread begins with the blocking operation.
1629 *
1630 * A blocking operation consists of an optional watchdog initialization and the
1631 * setting of the appropriate thread blocking state with the corresponding
1632 * scheduler block operation.
1633 */
1634#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1635
1636/**
1637 * @brief Indicates that the thread completed the blocking operation.
1638 */
1639#define THREAD_WAIT_STATE_BLOCKED 0x2U
1640
1641/**
1642 * @brief Indicates that a condition to end the thread wait occurred.
1643 *
1644 * This could be a timeout, a signal, an event or a resource availability.
1645 */
1646#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1647
1648/**
1649 * @brief Mask to get the thread wait class flags.
1650 */
1651#define THREAD_WAIT_CLASS_MASK 0xff00U
1652
1653/**
1654 * @brief Indicates that the thread waits for an event.
1655 */
1656#define THREAD_WAIT_CLASS_EVENT 0x100U
1657
1658/**
1659 * @brief Indicates that the thread waits for a system event.
1660 */
1661#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1662
1663/**
1664 * @brief Indicates that the thread waits for an object.
1665 */
1666#define THREAD_WAIT_CLASS_OBJECT 0x400U
1667
1668/**
1669 * @brief Indicates that the thread waits for a period.
1670 */
1671#define THREAD_WAIT_CLASS_PERIOD 0x800U
1672
1673RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1674  Thread_Control    *the_thread,
1675  Thread_Wait_flags  flags
1676)
1677{
1678#if defined(RTEMS_SMP)
1679  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1680#else
1681  the_thread->Wait.flags = flags;
1682#endif
1683}
1684
1685RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1686  const Thread_Control *the_thread
1687)
1688{
1689#if defined(RTEMS_SMP)
1690  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1691#else
1692  return the_thread->Wait.flags;
1693#endif
1694}
1695
1696RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
1697  const Thread_Control *the_thread
1698)
1699{
1700#if defined(RTEMS_SMP)
1701  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
1702#else
1703  return the_thread->Wait.flags;
1704#endif
1705}
1706
1707/**
1708 * @brief Tries to change the thread wait flags with release semantics in case
1709 * of success.
1710 *
1711 * Must be called inside a critical section (interrupts disabled).
1712 *
1713 * In case the wait flags are equal to the expected wait flags, then the wait
1714 * flags are set to the desired wait flags.
1715 *
1716 * @param[in] the_thread The thread.
1717 * @param[in] expected_flags The expected wait flags.
1718 * @param[in] desired_flags The desired wait flags.
1719 *
1720 * @retval true The wait flags were equal to the expected wait flags.
1721 * @retval false Otherwise.
1722 */
1723RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1724  Thread_Control    *the_thread,
1725  Thread_Wait_flags  expected_flags,
1726  Thread_Wait_flags  desired_flags
1727)
1728{
1729  _Assert( _ISR_Get_level() != 0 );
1730
1731#if defined(RTEMS_SMP)
1732  return _Atomic_Compare_exchange_uint(
1733    &the_thread->Wait.flags,
1734    &expected_flags,
1735    desired_flags,
1736    ATOMIC_ORDER_RELEASE,
1737    ATOMIC_ORDER_RELAXED
1738  );
1739#else
1740  bool success = ( the_thread->Wait.flags == expected_flags );
1741
1742  if ( success ) {
1743    the_thread->Wait.flags = desired_flags;
1744  }
1745
1746  return success;
1747#endif
1748}
1749
1750/**
1751 * @brief Tries to change the thread wait flags with acquire semantics.
1752 *
1753 * In case the wait flags are equal to the expected wait flags, then the wait
1754 * flags are set to the desired wait flags.
1755 *
1756 * @param[in] the_thread The thread.
1757 * @param[in] expected_flags The expected wait flags.
1758 * @param[in] desired_flags The desired wait flags.
1759 *
1760 * @retval true The wait flags were equal to the expected wait flags.
1761 * @retval false Otherwise.
1762 */
1763RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1764  Thread_Control    *the_thread,
1765  Thread_Wait_flags  expected_flags,
1766  Thread_Wait_flags  desired_flags
1767)
1768{
1769  bool success;
1770#if defined(RTEMS_SMP)
1771  return _Atomic_Compare_exchange_uint(
1772    &the_thread->Wait.flags,
1773    &expected_flags,
1774    desired_flags,
1775    ATOMIC_ORDER_ACQUIRE,
1776    ATOMIC_ORDER_ACQUIRE
1777  );
1778#else
1779  ISR_Level level;
1780
1781  _ISR_Local_disable( level );
1782
1783  success = _Thread_Wait_flags_try_change_release(
1784    the_thread,
1785    expected_flags,
1786    desired_flags
1787  );
1788
1789  _ISR_Local_enable( level );
1790#endif
1791
1792  return success;
1793}
1794
1795/**
1796 * @brief Returns the object identifier of the object containing the current
1797 * thread wait queue.
1798 *
1799 * This function may be used for debug and system information purposes.  The
1800 * caller must be the owner of the thread lock.
1801 *
1802 * @retval 0 The thread waits on no thread queue currently, the thread wait
1803 *   queue is not contained in an object, or the current thread state provides
1804 *   insufficient information, e.g. the thread is in the middle of a blocking
1805 *   operation.
1806 * @retval other The object identifier of the object containing the thread wait
1807 *   queue.
1808 */
1809Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1810
1811RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1812  const Thread_Control *the_thread
1813)
1814{
1815  return (Status_Control) the_thread->Wait.return_code;
1816}
1817
1818/**
1819 * @brief General purpose thread wait timeout.
1820 *
1821 * @param[in] watchdog The thread timer watchdog.
1822 */
1823void _Thread_Timeout( Watchdog_Control *watchdog );
1824
1825RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1826  Thread_Timer_information *timer,
1827  Per_CPU_Control          *cpu
1828)
1829{
1830  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1831  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1832  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1833}
1834
1835RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative(
1836  Thread_Control                 *the_thread,
1837  Per_CPU_Control                *cpu,
1838  Watchdog_Service_routine_entry  routine,
1839  Watchdog_Interval               ticks
1840)
1841{
1842  ISR_lock_Context lock_context;
1843
1844  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1845
1846  the_thread->Timer.header =
1847    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1848  the_thread->Timer.Watchdog.routine = routine;
1849  _Watchdog_Per_CPU_insert_relative( &the_thread->Timer.Watchdog, cpu, ticks );
1850
1851  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1852}
1853
1854RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_absolute(
1855  Thread_Control                 *the_thread,
1856  Per_CPU_Control                *cpu,
1857  Watchdog_Service_routine_entry  routine,
1858  uint64_t                        expire
1859)
1860{
1861  ISR_lock_Context lock_context;
1862
1863  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1864
1865  the_thread->Timer.header =
1866    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_ABSOLUTE ];
1867  the_thread->Timer.Watchdog.routine = routine;
1868  _Watchdog_Per_CPU_insert_absolute( &the_thread->Timer.Watchdog, cpu, expire );
1869
1870  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1871}
1872
1873RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1874{
1875  ISR_lock_Context lock_context;
1876
1877  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1878
1879  _Watchdog_Per_CPU_remove(
1880    &the_thread->Timer.Watchdog,
1881#if defined(RTEMS_SMP)
1882    the_thread->Timer.Watchdog.cpu,
1883#else
1884    _Per_CPU_Get(),
1885#endif
1886    the_thread->Timer.header
1887  );
1888
1889  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1890}
1891
1892RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1893  Thread_Control     *the_thread,
1894  Thread_queue_Queue *queue
1895)
1896{
1897  _Thread_Wait_tranquilize( the_thread );
1898  _Thread_Timer_remove( the_thread );
1899
1900#if defined(RTEMS_MULTIPROCESSING)
1901  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1902    _Thread_Unblock( the_thread );
1903  } else {
1904    _Thread_queue_Unblock_proxy( queue, the_thread );
1905  }
1906#else
1907  (void) queue;
1908  _Thread_Unblock( the_thread );
1909#endif
1910}
1911
1912/** @}*/
1913
1914#ifdef __cplusplus
1915}
1916#endif
1917
1918#if defined(RTEMS_MULTIPROCESSING)
1919#include <rtems/score/threadmp.h>
1920#endif
1921
1922#endif
1923/* end of include file */
Note: See TracBrowser for help on using the repository browser.