source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 02878626

5
Last change on this file since 02878626 was 02878626, checked in by Sebastian Huber <sebastian.huber@…>, on 10/17/17 at 14:15:31

score: Add _Thread_Add_timeout_ticks()

Replace _Thread_Timer_insert_monotonic() with
_Thread_Add_timeout_ticks().

Update #3117.
Update #3182.

  • Property mode set to 100644
File size: 51.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2017 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/schedulernodeimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/threadqimpl.h>
35#include <rtems/score/todimpl.h>
36#include <rtems/score/freechain.h>
37#include <rtems/score/watchdogimpl.h>
38#include <rtems/config.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/**
45 * @addtogroup ScoreThread
46 */
47/**@{**/
48
49/**
50 *  The following structure contains the information necessary to manage
51 *  a thread which it is  waiting for a resource.
52 */
53#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
54
55/**
56 *  Self for the GNU Ada Run-Time
57 */
58extern void *rtems_ada_self;
59
60typedef struct {
61  Objects_Information Objects;
62
63  Freechain_Control Free_thread_queue_heads;
64} Thread_Information;
65
66/**
67 *  The following defines the information control block used to
68 *  manage this class of objects.
69 */
70extern Thread_Information _Thread_Internal_information;
71
72/**
73 *  The following points to the thread whose floating point
74 *  context is currently loaded.
75 */
76#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
77extern Thread_Control *_Thread_Allocated_fp;
78#endif
79
80#if defined(RTEMS_SMP)
81#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
82  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
83#endif
84
85typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
86
87void _Thread_Iterate(
88  Thread_Visitor  visitor,
89  void           *arg
90);
91
92void _Thread_Initialize_information(
93  Thread_Information  *information,
94  Objects_APIs         the_api,
95  uint16_t             the_class,
96  uint32_t             maximum,
97  bool                 is_string,
98  uint32_t             maximum_name_length
99);
100
101/**
102 *  @brief Initialize thread handler.
103 *
104 *  This routine performs the initialization necessary for this handler.
105 */
106void _Thread_Handler_initialization(void);
107
108/**
109 *  @brief Create idle thread.
110 *
111 *  This routine creates the idle thread.
112 *
113 *  @warning No thread should be created before this one.
114 */
115void _Thread_Create_idle(void);
116
117/**
118 *  @brief Start thread multitasking.
119 *
120 *  This routine initiates multitasking.  It is invoked only as
121 *  part of initialization and its invocation is the last act of
122 *  the non-multitasking part of the system initialization.
123 */
124void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
125
126/**
127 *  @brief Allocate the requested stack space for the thread.
128 *
129 *  Allocate the requested stack space for the thread.
130 *  Set the Start.stack field to the address of the stack.
131 *
132 *  @param[in] the_thread is the thread where the stack space is requested
133 *  @param[in] stack_size is the stack space is requested
134 *
135 *  @retval actual size allocated after any adjustment
136 *  @retval zero if the allocation failed
137 */
138size_t _Thread_Stack_Allocate(
139  Thread_Control *the_thread,
140  size_t          stack_size
141);
142
143/**
144 *  @brief Deallocate thread stack.
145 *
146 *  Deallocate the Thread's stack.
147 */
148void _Thread_Stack_Free(
149  Thread_Control *the_thread
150);
151
152/**
153 *  @brief Initialize thread.
154 *
155 *  This routine initializes the specified the thread.  It allocates
156 *  all memory associated with this thread.  It completes by adding
157 *  the thread to the local object table so operations on this
158 *  thread id are allowed.
159 *
160 *  @note If stack_area is NULL, it is allocated from the workspace.
161 *
162 *  @note If the stack is allocated from the workspace, then it is
163 *        guaranteed to be of at least minimum size.
164 */
165bool _Thread_Initialize(
166  Thread_Information                   *information,
167  Thread_Control                       *the_thread,
168  const struct _Scheduler_Control      *scheduler,
169  void                                 *stack_area,
170  size_t                                stack_size,
171  bool                                  is_fp,
172  Priority_Control                      priority,
173  bool                                  is_preemptible,
174  Thread_CPU_budget_algorithms          budget_algorithm,
175  Thread_CPU_budget_algorithm_callout   budget_callout,
176  uint32_t                              isr_level,
177  Objects_Name                          name
178);
179
180/**
181 *  @brief Initializes thread and executes it.
182 *
183 *  This routine initializes the executable information for a thread
184 *  and makes it ready to execute.  After this routine executes, the
185 *  thread competes with all other threads for CPU time.
186 *
187 *  @param the_thread The thread to be started.
188 *  @param entry The thread entry information.
189 */
190bool _Thread_Start(
191  Thread_Control                 *the_thread,
192  const Thread_Entry_information *entry,
193  ISR_lock_Context               *lock_context
194);
195
196void _Thread_Restart_self(
197  Thread_Control                 *executing,
198  const Thread_Entry_information *entry,
199  ISR_lock_Context               *lock_context
200) RTEMS_NO_RETURN;
201
202bool _Thread_Restart_other(
203  Thread_Control                 *the_thread,
204  const Thread_Entry_information *entry,
205  ISR_lock_Context               *lock_context
206);
207
208void _Thread_Yield( Thread_Control *executing );
209
210Thread_Life_state _Thread_Change_life(
211  Thread_Life_state clear,
212  Thread_Life_state set,
213  Thread_Life_state ignore
214);
215
216Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
217
218/**
219 * @brief Kills all zombie threads in the system.
220 *
221 * Threads change into the zombie state as the last step in the thread
222 * termination sequence right before a context switch to the heir thread is
223 * initiated.  Since the thread stack is still in use during this phase we have
224 * to postpone the thread stack reclamation until this point.  On SMP
225 * configurations we may have to busy wait for context switch completion here.
226 */
227void _Thread_Kill_zombies( void );
228
229void _Thread_Exit(
230  Thread_Control    *executing,
231  Thread_Life_state  set,
232  void              *exit_value
233);
234
235void _Thread_Join(
236  Thread_Control       *the_thread,
237  States_Control        waiting_for_join,
238  Thread_Control       *executing,
239  Thread_queue_Context *queue_context
240);
241
242void _Thread_Cancel(
243  Thread_Control *the_thread,
244  Thread_Control *executing,
245  void           *exit_value
246);
247
248typedef struct {
249  Thread_queue_Context  Base;
250  Thread_Control       *cancel;
251} Thread_Close_context;
252
253/**
254 * @brief Closes the thread.
255 *
256 * Closes the thread object and starts the thread termination sequence.  In
257 * case the executing thread is not terminated, then this function waits until
258 * the terminating thread reached the zombie state.
259 */
260void _Thread_Close(
261  Thread_Control       *the_thread,
262  Thread_Control       *executing,
263  Thread_Close_context *context
264);
265
266RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
267{
268  return _States_Is_ready( the_thread->current_state );
269}
270
271States_Control _Thread_Clear_state_locked(
272  Thread_Control *the_thread,
273  States_Control  state
274);
275
276/**
277 * @brief Clears the specified thread state.
278 *
279 * In case the previous state is a non-ready state and the next state is the
280 * ready state, then the thread is unblocked by the scheduler.
281 *
282 * @param[in] the_thread The thread.
283 * @param[in] state The state to clear.  It must not be zero.
284 *
285 * @return The previous state.
286 */
287States_Control _Thread_Clear_state(
288  Thread_Control *the_thread,
289  States_Control  state
290);
291
292States_Control _Thread_Set_state_locked(
293  Thread_Control *the_thread,
294  States_Control  state
295);
296
297/**
298 * @brief Sets the specified thread state.
299 *
300 * In case the previous state is the ready state, then the thread is blocked by
301 * the scheduler.
302 *
303 * @param[in] the_thread The thread.
304 * @param[in] state The state to set.  It must not be zero.
305 *
306 * @return The previous state.
307 */
308States_Control _Thread_Set_state(
309  Thread_Control *the_thread,
310  States_Control  state
311);
312
313/**
314 *  @brief Initializes enviroment for a thread.
315 *
316 *  This routine initializes the context of @a the_thread to its
317 *  appropriate starting state.
318 *
319 *  @param[in] the_thread is the pointer to the thread control block.
320 */
321void _Thread_Load_environment(
322  Thread_Control *the_thread
323);
324
325void _Thread_Entry_adaptor_idle( Thread_Control *executing );
326
327void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
328
329void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
330
331/**
332 *  @brief Wrapper function for all threads.
333 *
334 *  This routine is the wrapper function for all threads.  It is
335 *  the starting point for all threads.  The user provided thread
336 *  entry point is invoked by this routine.  Operations
337 *  which must be performed immediately before and after the user's
338 *  thread executes are found here.
339 *
340 *  @note On entry, it is assumed all interrupts are blocked and that this
341 *  routine needs to set the initial isr level.  This may or may not
342 *  actually be needed by the context switch routine and as a result
343 *  interrupts may already be at there proper level.  Either way,
344 *  setting the initial isr level properly here is safe.
345 */
346void _Thread_Handler( void );
347
348/**
349 * @brief Executes the global constructors and then restarts itself as the
350 * first initialization thread.
351 *
352 * The first initialization thread is the first RTEMS initialization task or
353 * the first POSIX initialization thread in case no RTEMS initialization tasks
354 * are present.
355 */
356void _Thread_Global_construction(
357  Thread_Control                 *executing,
358  const Thread_Entry_information *entry
359) RTEMS_NO_RETURN;
360
361RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
362  Thread_Control   *the_thread,
363  ISR_lock_Context *lock_context
364)
365{
366  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
367}
368
369RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
370  Thread_Control   *the_thread,
371  ISR_lock_Context *lock_context
372)
373{
374  _ISR_lock_ISR_disable( lock_context );
375  _Thread_State_acquire_critical( the_thread, lock_context );
376}
377
378RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
379  ISR_lock_Context *lock_context
380)
381{
382  Thread_Control *executing;
383
384  _ISR_lock_ISR_disable( lock_context );
385  executing = _Thread_Executing;
386  _Thread_State_acquire_critical( executing, lock_context );
387
388  return executing;
389}
390
391RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
392  Thread_Control   *the_thread,
393  ISR_lock_Context *lock_context
394)
395{
396  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
397}
398
399RTEMS_INLINE_ROUTINE void _Thread_State_release(
400  Thread_Control   *the_thread,
401  ISR_lock_Context *lock_context
402)
403{
404  _Thread_State_release_critical( the_thread, lock_context );
405  _ISR_lock_ISR_enable( lock_context );
406}
407
408#if defined(RTEMS_DEBUG)
409RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
410  const Thread_Control *the_thread
411)
412{
413  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
414}
415#endif
416
417/**
418 * @brief Performs the priority actions specified by the thread queue context
419 * along the thread queue path.
420 *
421 * The caller must be the owner of the thread wait lock.
422 *
423 * @param start_of_path The start thread of the thread queue path.
424 * @param queue_context The thread queue context specifying the thread queue
425 *   path and initial thread priority actions.
426 *
427 * @see _Thread_queue_Path_acquire_critical().
428 */
429void _Thread_Priority_perform_actions(
430  Thread_Control       *start_of_path,
431  Thread_queue_Context *queue_context
432);
433
434/**
435 * @brief Adds the specified thread priority node to the corresponding thread
436 * priority aggregation.
437 *
438 * The caller must be the owner of the thread wait lock.
439 *
440 * @param the_thread The thread.
441 * @param priority_node The thread priority node to add.
442 * @param queue_context The thread queue context to return an updated set of
443 *   threads for _Thread_Priority_update().  The thread queue context must be
444 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
445 *   call of this function.
446 *
447 * @see _Thread_Wait_acquire().
448 */
449void _Thread_Priority_add(
450  Thread_Control       *the_thread,
451  Priority_Node        *priority_node,
452  Thread_queue_Context *queue_context
453);
454
455/**
456 * @brief Removes the specified thread priority node from the corresponding
457 * thread priority aggregation.
458 *
459 * The caller must be the owner of the thread wait lock.
460 *
461 * @param the_thread The thread.
462 * @param priority_node The thread priority node to remove.
463 * @param queue_context The thread queue context to return an updated set of
464 *   threads for _Thread_Priority_update().  The thread queue context must be
465 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
466 *   call of this function.
467 *
468 * @see _Thread_Wait_acquire().
469 */
470void _Thread_Priority_remove(
471  Thread_Control       *the_thread,
472  Priority_Node        *priority_node,
473  Thread_queue_Context *queue_context
474);
475
476/**
477 * @brief Propagates a thread priority value change in the specified thread
478 * priority node to the corresponding thread priority aggregation.
479 *
480 * The caller must be the owner of the thread wait lock.
481 *
482 * @param the_thread The thread.
483 * @param priority_node The thread priority node to change.
484 * @param prepend_it In case this is true, then the thread is prepended to
485 *   its priority group in its home scheduler instance, otherwise it is
486 *   appended.
487 * @param queue_context The thread queue context to return an updated set of
488 *   threads for _Thread_Priority_update().  The thread queue context must be
489 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
490 *   call of this function.
491 *
492 * @see _Thread_Wait_acquire().
493 */
494void _Thread_Priority_changed(
495  Thread_Control       *the_thread,
496  Priority_Node        *priority_node,
497  bool                  prepend_it,
498  Thread_queue_Context *queue_context
499);
500
501/**
502 * @brief Changes the thread priority value of the specified thread priority
503 * node in the corresponding thread priority aggregation.
504 *
505 * The caller must be the owner of the thread wait lock.
506 *
507 * @param the_thread The thread.
508 * @param priority_node The thread priority node to change.
509 * @param new_priority The new thread priority value of the thread priority
510 *   node to change.
511 * @param prepend_it In case this is true, then the thread is prepended to
512 *   its priority group in its home scheduler instance, otherwise it is
513 *   appended.
514 * @param queue_context The thread queue context to return an updated set of
515 *   threads for _Thread_Priority_update().  The thread queue context must be
516 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
517 *   call of this function.
518 *
519 * @see _Thread_Wait_acquire().
520 */
521RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
522  Thread_Control       *the_thread,
523  Priority_Node        *priority_node,
524  Priority_Control      new_priority,
525  bool                  prepend_it,
526  Thread_queue_Context *queue_context
527)
528{
529  _Priority_Node_set_priority( priority_node, new_priority );
530  _Thread_Priority_changed(
531    the_thread,
532    priority_node,
533    prepend_it,
534    queue_context
535  );
536}
537
538/**
539 * @brief Replaces the victim priority node with the replacement priority node
540 * in the corresponding thread priority aggregation.
541 *
542 * The caller must be the owner of the thread wait lock.
543 *
544 * @param the_thread The thread.
545 * @param victim_node The victim thread priority node.
546 * @param replacement_node The replacement thread priority node.
547 *
548 * @see _Thread_Wait_acquire().
549 */
550void _Thread_Priority_replace(
551  Thread_Control *the_thread,
552  Priority_Node  *victim_node,
553  Priority_Node  *replacement_node
554);
555
556/**
557 * @brief Adds a priority node to the corresponding thread priority
558 * aggregation.
559 *
560 * The caller must be the owner of the thread wait lock.
561 *
562 * @param the_thread The thread.
563 * @param priority_node The thread priority node to add.
564 * @param queue_context The thread queue context to return an updated set of
565 *   threads for _Thread_Priority_update().  The thread queue context must be
566 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
567 *   call of this function.
568 *
569 * @see _Thread_Priority_add(), _Thread_Priority_change(),
570 *   _Thread_Priority_changed() and _Thread_Priority_remove().
571 */
572void _Thread_Priority_update( Thread_queue_Context *queue_context );
573
574#if defined(RTEMS_SMP)
575void _Thread_Priority_and_sticky_update(
576  Thread_Control *the_thread,
577  int             sticky_level_change
578);
579#endif
580
581/**
582 * @brief Returns true if the left thread priority is less than the right
583 * thread priority in the intuitive sense of priority and false otherwise.
584 */
585RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
586  Priority_Control left,
587  Priority_Control right
588)
589{
590  return left > right;
591}
592
593/**
594 * @brief Returns the highest priority of the left and right thread priorities
595 * in the intuitive sense of priority.
596 */
597RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
598  Priority_Control left,
599  Priority_Control right
600)
601{
602  return _Thread_Priority_less_than( left, right ) ? right : left;
603}
604
605RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
606  Objects_Id id
607)
608{
609  uint32_t the_api;
610
611  the_api = _Objects_Get_API( id );
612
613  if ( !_Objects_Is_api_valid( the_api ) ) {
614    return NULL;
615  }
616
617  /*
618   * Threads are always first class :)
619   *
620   * There is no need to validate the object class of the object identifier,
621   * since this will be done by the object get methods.
622   */
623  return _Objects_Information_table[ the_api ][ 1 ];
624}
625
626/**
627 * @brief Gets a thread by its identifier.
628 *
629 * @see _Objects_Get().
630 */
631Thread_Control *_Thread_Get(
632  Objects_Id         id,
633  ISR_lock_Context  *lock_context
634);
635
636RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
637  const Thread_Control *thread
638)
639{
640#if defined(RTEMS_SMP)
641  return thread->Scheduler.cpu;
642#else
643  (void) thread;
644
645  return _Per_CPU_Get();
646#endif
647}
648
649RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
650  Thread_Control *thread,
651  Per_CPU_Control *cpu
652)
653{
654#if defined(RTEMS_SMP)
655  thread->Scheduler.cpu = cpu;
656#else
657  (void) thread;
658  (void) cpu;
659#endif
660}
661
662/**
663 * This function returns true if the_thread is the currently executing
664 * thread, and false otherwise.
665 */
666
667RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
668  const Thread_Control *the_thread
669)
670{
671  return ( the_thread == _Thread_Executing );
672}
673
674#if defined(RTEMS_SMP)
675/**
676 * @brief Returns @a true in case the thread executes currently on some
677 * processor in the system, otherwise @a false.
678 *
679 * Do not confuse this with _Thread_Is_executing() which checks only the
680 * current processor.
681 */
682RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
683  const Thread_Control *the_thread
684)
685{
686  return _CPU_Context_Get_is_executing( &the_thread->Registers );
687}
688#endif
689
690/**
691 * This function returns true if the_thread is the heir
692 * thread, and false otherwise.
693 */
694
695RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
696  const Thread_Control *the_thread
697)
698{
699  return ( the_thread == _Thread_Heir );
700}
701
702/**
703 * This routine clears any blocking state for the_thread.  It performs
704 * any necessary scheduling operations including the selection of
705 * a new heir thread.
706 */
707
708RTEMS_INLINE_ROUTINE void _Thread_Unblock (
709  Thread_Control *the_thread
710)
711{
712  _Thread_Clear_state( the_thread, STATES_BLOCKED );
713}
714
715/**
716 * This function returns true if the floating point context of
717 * the_thread is currently loaded in the floating point unit, and
718 * false otherwise.
719 */
720
721#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
722RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
723  const Thread_Control *the_thread
724)
725{
726  return ( the_thread == _Thread_Allocated_fp );
727}
728#endif
729
730/*
731 *  If the CPU has hardware floating point, then we must address saving
732 *  and restoring it as part of the context switch.
733 *
734 *  The second conditional compilation section selects the algorithm used
735 *  to context switch between floating point tasks.  The deferred algorithm
736 *  can be significantly better in a system with few floating point tasks
737 *  because it reduces the total number of save and restore FP context
738 *  operations.  However, this algorithm can not be used on all CPUs due
739 *  to unpredictable use of FP registers by some compilers for integer
740 *  operations.
741 */
742
743RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
744{
745#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
746#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
747  if ( executing->fp_context != NULL )
748    _Context_Save_fp( &executing->fp_context );
749#endif
750#endif
751}
752
753RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
754{
755#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
756#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
757  if ( (executing->fp_context != NULL) &&
758       !_Thread_Is_allocated_fp( executing ) ) {
759    if ( _Thread_Allocated_fp != NULL )
760      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
761    _Context_Restore_fp( &executing->fp_context );
762    _Thread_Allocated_fp = executing;
763  }
764#else
765  if ( executing->fp_context != NULL )
766    _Context_Restore_fp( &executing->fp_context );
767#endif
768#endif
769}
770
771/**
772 * This routine is invoked when the currently loaded floating
773 * point context is now longer associated with an active thread.
774 */
775
776#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
777RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
778{
779  _Thread_Allocated_fp = NULL;
780}
781#endif
782
783/**
784 * This function returns true if dispatching is disabled, and false
785 * otherwise.
786 */
787
788RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
789{
790  return ( _Thread_Dispatch_necessary );
791}
792
793/**
794 * This function returns true if the_thread is NULL and false otherwise.
795 */
796
797RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
798  const Thread_Control *the_thread
799)
800{
801  return ( the_thread == NULL );
802}
803
804/**
805 * @brief Is proxy blocking.
806 *
807 * status which indicates that a proxy is blocking, and false otherwise.
808 */
809RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
810  uint32_t   code
811)
812{
813  return (code == THREAD_STATUS_PROXY_BLOCKING);
814}
815
816RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
817{
818  /* Idle threads */
819  uint32_t maximum_internal_threads =
820    rtems_configuration_get_maximum_processors();
821
822  /* MPCI thread */
823#if defined(RTEMS_MULTIPROCESSING)
824  if ( _System_state_Is_multiprocessing ) {
825    ++maximum_internal_threads;
826  }
827#endif
828
829  return maximum_internal_threads;
830}
831
832RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
833{
834  return (Thread_Control *)
835    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
836}
837
838/**
839 * @brief Gets the heir of the processor and makes it executing.
840 *
841 * Must be called with interrupts disabled.  The thread dispatch necessary
842 * indicator is cleared as a side-effect.
843 *
844 * @return The heir thread.
845 *
846 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
847 * _Thread_Dispatch_update_heir().
848 */
849RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
850  Per_CPU_Control *cpu_self
851)
852{
853  Thread_Control *heir;
854
855  heir = cpu_self->heir;
856  cpu_self->dispatch_necessary = false;
857  cpu_self->executing = heir;
858
859  return heir;
860}
861
862RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
863  Thread_Control  *the_thread,
864  Per_CPU_Control *cpu
865)
866{
867  Timestamp_Control last;
868  Timestamp_Control ran;
869
870  last = cpu->cpu_usage_timestamp;
871  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
872  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
873  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
874}
875
876#if defined( RTEMS_SMP )
877RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
878  Per_CPU_Control *cpu_self,
879  Per_CPU_Control *cpu_for_heir,
880  Thread_Control  *heir
881)
882{
883  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
884
885  cpu_for_heir->heir = heir;
886
887  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
888}
889#endif
890
891void _Thread_Get_CPU_time_used(
892  Thread_Control    *the_thread,
893  Timestamp_Control *cpu_time_used
894);
895
896RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
897  Thread_Action_control *action_control
898)
899{
900  _Chain_Initialize_empty( &action_control->Chain );
901}
902
903RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
904  Thread_Action *action
905)
906{
907  _Chain_Set_off_chain( &action->Node );
908}
909
910RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
911  Thread_Control        *the_thread,
912  Thread_Action         *action,
913  Thread_Action_handler  handler
914)
915{
916  Per_CPU_Control *cpu_of_thread;
917
918  _Assert( _Thread_State_is_owner( the_thread ) );
919
920  cpu_of_thread = _Thread_Get_CPU( the_thread );
921
922  action->handler = handler;
923
924  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
925
926  _Chain_Append_if_is_off_chain_unprotected(
927    &the_thread->Post_switch_actions.Chain,
928    &action->Node
929  );
930}
931
932RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
933  Thread_Life_state life_state
934)
935{
936  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
937}
938
939RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
940  Thread_Life_state life_state
941)
942{
943  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
944}
945
946RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
947  Thread_Life_state life_state
948)
949{
950  return ( life_state
951    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
952}
953
954RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
955  Thread_Life_state life_state
956)
957{
958  return ( life_state
959    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
960}
961
962RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
963  const Thread_Control *the_thread
964)
965{
966  _Assert( _Thread_State_is_owner( the_thread ) );
967  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
968}
969
970RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
971  Thread_Control *the_thread
972)
973{
974#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
975  ++the_thread->resource_count;
976#else
977  (void) the_thread;
978#endif
979}
980
981RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
982  Thread_Control *the_thread
983)
984{
985#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
986  --the_thread->resource_count;
987#else
988  (void) the_thread;
989#endif
990}
991
992#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
993/**
994 * @brief Returns true if the thread owns resources, and false otherwise.
995 *
996 * Resources are accounted with the Thread_Control::resource_count resource
997 * counter.  This counter is used by mutex objects for example.
998 *
999 * @param[in] the_thread The thread.
1000 */
1001RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
1002  const Thread_Control *the_thread
1003)
1004{
1005  return the_thread->resource_count != 0;
1006}
1007#endif
1008
1009#if defined(RTEMS_SMP)
1010RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
1011  Thread_Control  *the_thread,
1012  Per_CPU_Control *cpu
1013)
1014{
1015  _Per_CPU_Acquire( cpu );
1016
1017  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
1018    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
1019    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
1020  }
1021
1022  _Per_CPU_Release( cpu );
1023}
1024#endif
1025
1026RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
1027  const Thread_Control *the_thread
1028)
1029{
1030#if defined(RTEMS_SMP)
1031  return the_thread->Scheduler.home;
1032#else
1033  (void) the_thread;
1034  return &_Scheduler_Table[ 0 ];
1035#endif
1036}
1037
1038RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1039  const Thread_Control *the_thread
1040)
1041{
1042#if defined(RTEMS_SMP)
1043  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1044  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1045    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1046  );
1047#else
1048  return the_thread->Scheduler.nodes;
1049#endif
1050}
1051
1052RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1053  const Thread_Control *the_thread,
1054  size_t                scheduler_index
1055)
1056{
1057#if defined(RTEMS_SMP)
1058  return (Scheduler_Node *)
1059    ( (uintptr_t) the_thread->Scheduler.nodes
1060      + scheduler_index * _Scheduler_Node_size );
1061#else
1062  _Assert( scheduler_index == 0 );
1063  (void) scheduler_index;
1064  return the_thread->Scheduler.nodes;
1065#endif
1066}
1067
1068#if defined(RTEMS_SMP)
1069RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1070  Thread_Control   *the_thread,
1071  ISR_lock_Context *lock_context
1072)
1073{
1074  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1075}
1076
1077RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1078  Thread_Control   *the_thread,
1079  ISR_lock_Context *lock_context
1080)
1081{
1082  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1083}
1084
1085#if defined(RTEMS_SMP)
1086void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1087#endif
1088
1089RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1090  Thread_Control         *the_thread,
1091  Scheduler_Node         *scheduler_node,
1092  Scheduler_Node_request  request
1093)
1094{
1095  ISR_lock_Context       lock_context;
1096  Scheduler_Node_request current_request;
1097
1098  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1099
1100  current_request = scheduler_node->Thread.request;
1101
1102  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1103    _Assert(
1104      request == SCHEDULER_NODE_REQUEST_ADD
1105        || request == SCHEDULER_NODE_REQUEST_REMOVE
1106    );
1107    _Assert( scheduler_node->Thread.next_request == NULL );
1108    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1109    the_thread->Scheduler.requests = scheduler_node;
1110  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1111    _Assert(
1112      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1113        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1114      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1115        && request == SCHEDULER_NODE_REQUEST_ADD )
1116    );
1117    request = SCHEDULER_NODE_REQUEST_NOTHING;
1118  }
1119
1120  scheduler_node->Thread.request = request;
1121
1122  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1123}
1124
1125RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1126  Thread_Control *the_thread,
1127  Scheduler_Node *scheduler_node
1128)
1129{
1130  _Chain_Append_unprotected(
1131    &the_thread->Scheduler.Wait_nodes,
1132    &scheduler_node->Thread.Wait_node
1133  );
1134  _Thread_Scheduler_add_request(
1135    the_thread,
1136    scheduler_node,
1137    SCHEDULER_NODE_REQUEST_ADD
1138  );
1139}
1140
1141RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1142  Thread_Control *the_thread,
1143  Scheduler_Node *scheduler_node
1144)
1145{
1146  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1147  _Thread_Scheduler_add_request(
1148    the_thread,
1149    scheduler_node,
1150    SCHEDULER_NODE_REQUEST_REMOVE
1151  );
1152}
1153#endif
1154
1155/**
1156 * @brief Returns the priority of the thread.
1157 *
1158 * Returns the user API and thread wait information relevant thread priority.
1159 * This includes temporary thread priority adjustments due to locking
1160 * protocols, a job release or the POSIX sporadic server for example.
1161 *
1162 * @return The priority of the thread.
1163 */
1164RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1165  const Thread_Control *the_thread
1166)
1167{
1168  Scheduler_Node *scheduler_node;
1169
1170  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1171  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1172}
1173
1174/**
1175 * @brief Acquires the thread wait default lock inside a critical section
1176 * (interrupts disabled).
1177 *
1178 * @param[in] the_thread The thread.
1179 * @param[in] lock_context The lock context used for the corresponding lock
1180 *   release.
1181 *
1182 * @see _Thread_Wait_release_default_critical().
1183 */
1184RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1185  Thread_Control   *the_thread,
1186  ISR_lock_Context *lock_context
1187)
1188{
1189  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1190}
1191
1192/**
1193 * @brief Acquires the thread wait default lock and returns the executing
1194 * thread.
1195 *
1196 * @param[in] lock_context The lock context used for the corresponding lock
1197 *   release.
1198 *
1199 * @return The executing thread.
1200 *
1201 * @see _Thread_Wait_release_default().
1202 */
1203RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1204  ISR_lock_Context *lock_context
1205)
1206{
1207  Thread_Control *executing;
1208
1209  _ISR_lock_ISR_disable( lock_context );
1210  executing = _Thread_Executing;
1211  _Thread_Wait_acquire_default_critical( executing, lock_context );
1212
1213  return executing;
1214}
1215
1216/**
1217 * @brief Acquires the thread wait default lock and disables interrupts.
1218 *
1219 * @param[in] the_thread The thread.
1220 * @param[in] lock_context The lock context used for the corresponding lock
1221 *   release.
1222 *
1223 * @see _Thread_Wait_release_default().
1224 */
1225RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1226  Thread_Control   *the_thread,
1227  ISR_lock_Context *lock_context
1228)
1229{
1230  _ISR_lock_ISR_disable( lock_context );
1231  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1232}
1233
1234/**
1235 * @brief Releases the thread wait default lock inside a critical section
1236 * (interrupts disabled).
1237 *
1238 * The previous interrupt status is not restored.
1239 *
1240 * @param[in] the_thread The thread.
1241 * @param[in] lock_context The lock context used for the corresponding lock
1242 *   acquire.
1243 */
1244RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1245  Thread_Control   *the_thread,
1246  ISR_lock_Context *lock_context
1247)
1248{
1249  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1250}
1251
1252/**
1253 * @brief Releases the thread wait default lock and restores the previous
1254 * interrupt status.
1255 *
1256 * @param[in] the_thread The thread.
1257 * @param[in] lock_context The lock context used for the corresponding lock
1258 *   acquire.
1259 */
1260RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1261  Thread_Control   *the_thread,
1262  ISR_lock_Context *lock_context
1263)
1264{
1265  _Thread_Wait_release_default_critical( the_thread, lock_context );
1266  _ISR_lock_ISR_enable( lock_context );
1267}
1268
1269#if defined(RTEMS_SMP)
1270#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1271  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1272
1273RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1274  Thread_Control            *the_thread,
1275  Thread_queue_Lock_context *queue_lock_context
1276)
1277{
1278  Chain_Node *first;
1279
1280  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1281  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1282
1283  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1284    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1285  }
1286}
1287
1288RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1289  Thread_queue_Queue        *queue,
1290  Thread_queue_Lock_context *queue_lock_context
1291)
1292{
1293  _Thread_queue_Queue_acquire_critical(
1294    queue,
1295    &_Thread_Executing->Potpourri_stats,
1296    &queue_lock_context->Lock_context
1297  );
1298}
1299
1300RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1301  Thread_queue_Queue        *queue,
1302  Thread_queue_Lock_context *queue_lock_context
1303)
1304{
1305  _Thread_queue_Queue_release_critical(
1306    queue,
1307    &queue_lock_context->Lock_context
1308  );
1309}
1310#endif
1311
1312/**
1313 * @brief Acquires the thread wait lock inside a critical section (interrupts
1314 * disabled).
1315 *
1316 * @param[in] the_thread The thread.
1317 * @param[in] queue_context The thread queue context for the corresponding
1318 *   _Thread_Wait_release_critical().
1319 */
1320RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1321  Thread_Control       *the_thread,
1322  Thread_queue_Context *queue_context
1323)
1324{
1325#if defined(RTEMS_SMP)
1326  Thread_queue_Queue *queue;
1327
1328  _Thread_Wait_acquire_default_critical(
1329    the_thread,
1330    &queue_context->Lock_context.Lock_context
1331  );
1332
1333  queue = the_thread->Wait.queue;
1334  queue_context->Lock_context.Wait.queue = queue;
1335
1336  if ( queue != NULL ) {
1337    _Thread_queue_Gate_add(
1338      &the_thread->Wait.Lock.Pending_requests,
1339      &queue_context->Lock_context.Wait.Gate
1340    );
1341    _Thread_Wait_release_default_critical(
1342      the_thread,
1343      &queue_context->Lock_context.Lock_context
1344    );
1345    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1346
1347    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1348      _Thread_Wait_release_queue_critical(
1349        queue,
1350        &queue_context->Lock_context
1351      );
1352      _Thread_Wait_acquire_default_critical(
1353        the_thread,
1354        &queue_context->Lock_context.Lock_context
1355      );
1356      _Thread_Wait_remove_request_locked(
1357        the_thread,
1358        &queue_context->Lock_context
1359      );
1360      _Assert( the_thread->Wait.queue == NULL );
1361    }
1362  }
1363#else
1364  (void) the_thread;
1365  (void) queue_context;
1366#endif
1367}
1368
1369/**
1370 * @brief Acquires the thread wait default lock and disables interrupts.
1371 *
1372 * @param[in] the_thread The thread.
1373 * @param[in] queue_context The thread queue context for the corresponding
1374 *   _Thread_Wait_release().
1375 */
1376RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1377  Thread_Control       *the_thread,
1378  Thread_queue_Context *queue_context
1379)
1380{
1381  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1382  _Thread_Wait_acquire_critical( the_thread, queue_context );
1383}
1384
1385/**
1386 * @brief Releases the thread wait lock inside a critical section (interrupts
1387 * disabled).
1388 *
1389 * The previous interrupt status is not restored.
1390 *
1391 * @param[in] the_thread The thread.
1392 * @param[in] queue_context The thread queue context used for corresponding
1393 *   _Thread_Wait_acquire_critical().
1394 */
1395RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1396  Thread_Control       *the_thread,
1397  Thread_queue_Context *queue_context
1398)
1399{
1400#if defined(RTEMS_SMP)
1401  Thread_queue_Queue *queue;
1402
1403  queue = queue_context->Lock_context.Wait.queue;
1404
1405  if ( queue != NULL ) {
1406    _Thread_Wait_release_queue_critical(
1407      queue, &queue_context->Lock_context
1408    );
1409    _Thread_Wait_acquire_default_critical(
1410      the_thread,
1411      &queue_context->Lock_context.Lock_context
1412    );
1413    _Thread_Wait_remove_request_locked(
1414      the_thread,
1415      &queue_context->Lock_context
1416    );
1417  }
1418
1419  _Thread_Wait_release_default_critical(
1420    the_thread,
1421    &queue_context->Lock_context.Lock_context
1422  );
1423#else
1424  (void) the_thread;
1425  (void) queue_context;
1426#endif
1427}
1428
1429/**
1430 * @brief Releases the thread wait lock and restores the previous interrupt
1431 * status.
1432 *
1433 * @param[in] the_thread The thread.
1434 * @param[in] queue_context The thread queue context used for corresponding
1435 *   _Thread_Wait_acquire().
1436 */
1437RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1438  Thread_Control       *the_thread,
1439  Thread_queue_Context *queue_context
1440)
1441{
1442  _Thread_Wait_release_critical( the_thread, queue_context );
1443  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1444}
1445
1446/**
1447 * @brief Claims the thread wait queue.
1448 *
1449 * The caller must not be the owner of the default thread wait lock.  The
1450 * caller must be the owner of the corresponding thread queue lock.  The
1451 * registration of the corresponding thread queue operations is deferred and
1452 * done after the deadlock detection.  This is crucial to support timeouts on
1453 * SMP configurations.
1454 *
1455 * @param[in] the_thread The thread.
1456 * @param[in] queue The new thread queue.
1457 *
1458 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
1459 */
1460RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1461  Thread_Control     *the_thread,
1462  Thread_queue_Queue *queue
1463)
1464{
1465  ISR_lock_Context lock_context;
1466
1467  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1468
1469  _Assert( the_thread->Wait.queue == NULL );
1470
1471#if defined(RTEMS_SMP)
1472  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1473  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1474  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1475#endif
1476
1477  the_thread->Wait.queue = queue;
1478
1479  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1480}
1481
1482/**
1483 * @brief Finalizes the thread wait queue claim via registration of the
1484 * corresponding thread queue operations.
1485 *
1486 * @param[in] the_thread The thread.
1487 * @param[in] operations The corresponding thread queue operations.
1488 */
1489RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
1490  Thread_Control                *the_thread,
1491  const Thread_queue_Operations *operations
1492)
1493{
1494  the_thread->Wait.operations = operations;
1495}
1496
1497/**
1498 * @brief Removes a thread wait lock request.
1499 *
1500 * On SMP configurations, removes a thread wait lock request.
1501 *
1502 * On other configurations, this function does nothing.
1503 *
1504 * @param[in] the_thread The thread.
1505 * @param[in] queue_lock_context The thread queue lock context used for
1506 *   corresponding _Thread_Wait_acquire().
1507 */
1508RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1509  Thread_Control            *the_thread,
1510  Thread_queue_Lock_context *queue_lock_context
1511)
1512{
1513#if defined(RTEMS_SMP)
1514  ISR_lock_Context lock_context;
1515
1516  _Thread_Wait_acquire_default( the_thread, &lock_context );
1517  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1518  _Thread_Wait_release_default( the_thread, &lock_context );
1519#else
1520  (void) the_thread;
1521  (void) queue_lock_context;
1522#endif
1523}
1524
1525/**
1526 * @brief Restores the default thread wait queue and operations.
1527 *
1528 * The caller must be the owner of the current thread wait queue lock.
1529 *
1530 * On SMP configurations, the pending requests are updated to use the stale
1531 * thread queue operations.
1532 *
1533 * @param[in] the_thread The thread.
1534 *
1535 * @see _Thread_Wait_claim().
1536 */
1537RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1538  Thread_Control *the_thread
1539)
1540{
1541#if defined(RTEMS_SMP)
1542  ISR_lock_Context  lock_context;
1543  Chain_Node       *node;
1544  const Chain_Node *tail;
1545
1546  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1547
1548  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1549  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1550
1551  if ( node != tail ) {
1552    do {
1553      Thread_queue_Context *queue_context;
1554
1555      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1556      queue_context->Lock_context.Wait.queue = NULL;
1557
1558      node = _Chain_Next( node );
1559    } while ( node != tail );
1560
1561    _Thread_queue_Gate_add(
1562      &the_thread->Wait.Lock.Pending_requests,
1563      &the_thread->Wait.Lock.Tranquilizer
1564    );
1565  } else {
1566    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1567  }
1568#endif
1569
1570  the_thread->Wait.queue = NULL;
1571  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1572
1573#if defined(RTEMS_SMP)
1574  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1575#endif
1576}
1577
1578/**
1579 * @brief Tranquilizes the thread after a wait on a thread queue.
1580 *
1581 * After the violent blocking procedure this function makes the thread calm and
1582 * peaceful again so that it can carry out its normal work.
1583 *
1584 * On SMP configurations, ensures that all pending thread wait lock requests
1585 * completed before the thread is able to begin a new thread wait procedure.
1586 *
1587 * On other configurations, this function does nothing.
1588 *
1589 * It must be called after a _Thread_Wait_claim() exactly once
1590 *  - after the corresponding thread queue lock was released, and
1591 *  - the default wait state is restored or some other processor is about to do
1592 *    this.
1593 *
1594 * @param[in] the_thread The thread.
1595 */
1596RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1597  Thread_Control *the_thread
1598)
1599{
1600#if defined(RTEMS_SMP)
1601  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1602#else
1603  (void) the_thread;
1604#endif
1605}
1606
1607/**
1608 * @brief Cancels a thread wait on a thread queue.
1609 *
1610 * @param[in] the_thread The thread.
1611 * @param[in] queue_context The thread queue context used for corresponding
1612 *   _Thread_Wait_acquire().
1613 */
1614RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1615  Thread_Control       *the_thread,
1616  Thread_queue_Context *queue_context
1617)
1618{
1619  Thread_queue_Queue *queue;
1620
1621  queue = the_thread->Wait.queue;
1622
1623#if defined(RTEMS_SMP)
1624  if ( queue != NULL ) {
1625    _Assert( queue_context->Lock_context.Wait.queue == queue );
1626#endif
1627
1628    ( *the_thread->Wait.operations->extract )(
1629      queue,
1630      the_thread,
1631      queue_context
1632    );
1633    _Thread_Wait_restore_default( the_thread );
1634
1635#if defined(RTEMS_SMP)
1636    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1637    queue_context->Lock_context.Wait.queue = queue;
1638  }
1639#endif
1640}
1641
1642/**
1643 * @brief The initial thread wait flags value set by _Thread_Initialize().
1644 */
1645#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1646
1647/**
1648 * @brief Mask to get the thread wait state flags.
1649 */
1650#define THREAD_WAIT_STATE_MASK 0xffU
1651
1652/**
1653 * @brief Indicates that the thread begins with the blocking operation.
1654 *
1655 * A blocking operation consists of an optional watchdog initialization and the
1656 * setting of the appropriate thread blocking state with the corresponding
1657 * scheduler block operation.
1658 */
1659#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1660
1661/**
1662 * @brief Indicates that the thread completed the blocking operation.
1663 */
1664#define THREAD_WAIT_STATE_BLOCKED 0x2U
1665
1666/**
1667 * @brief Indicates that a condition to end the thread wait occurred.
1668 *
1669 * This could be a timeout, a signal, an event or a resource availability.
1670 */
1671#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1672
1673/**
1674 * @brief Mask to get the thread wait class flags.
1675 */
1676#define THREAD_WAIT_CLASS_MASK 0xff00U
1677
1678/**
1679 * @brief Indicates that the thread waits for an event.
1680 */
1681#define THREAD_WAIT_CLASS_EVENT 0x100U
1682
1683/**
1684 * @brief Indicates that the thread waits for a system event.
1685 */
1686#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1687
1688/**
1689 * @brief Indicates that the thread waits for an object.
1690 */
1691#define THREAD_WAIT_CLASS_OBJECT 0x400U
1692
1693/**
1694 * @brief Indicates that the thread waits for a period.
1695 */
1696#define THREAD_WAIT_CLASS_PERIOD 0x800U
1697
1698RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1699  Thread_Control    *the_thread,
1700  Thread_Wait_flags  flags
1701)
1702{
1703#if defined(RTEMS_SMP)
1704  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1705#else
1706  the_thread->Wait.flags = flags;
1707#endif
1708}
1709
1710RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1711  const Thread_Control *the_thread
1712)
1713{
1714#if defined(RTEMS_SMP)
1715  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1716#else
1717  return the_thread->Wait.flags;
1718#endif
1719}
1720
1721RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
1722  const Thread_Control *the_thread
1723)
1724{
1725#if defined(RTEMS_SMP)
1726  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
1727#else
1728  return the_thread->Wait.flags;
1729#endif
1730}
1731
1732/**
1733 * @brief Tries to change the thread wait flags with release semantics in case
1734 * of success.
1735 *
1736 * Must be called inside a critical section (interrupts disabled).
1737 *
1738 * In case the wait flags are equal to the expected wait flags, then the wait
1739 * flags are set to the desired wait flags.
1740 *
1741 * @param[in] the_thread The thread.
1742 * @param[in] expected_flags The expected wait flags.
1743 * @param[in] desired_flags The desired wait flags.
1744 *
1745 * @retval true The wait flags were equal to the expected wait flags.
1746 * @retval false Otherwise.
1747 */
1748RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1749  Thread_Control    *the_thread,
1750  Thread_Wait_flags  expected_flags,
1751  Thread_Wait_flags  desired_flags
1752)
1753{
1754  _Assert( _ISR_Get_level() != 0 );
1755
1756#if defined(RTEMS_SMP)
1757  return _Atomic_Compare_exchange_uint(
1758    &the_thread->Wait.flags,
1759    &expected_flags,
1760    desired_flags,
1761    ATOMIC_ORDER_RELEASE,
1762    ATOMIC_ORDER_RELAXED
1763  );
1764#else
1765  bool success = ( the_thread->Wait.flags == expected_flags );
1766
1767  if ( success ) {
1768    the_thread->Wait.flags = desired_flags;
1769  }
1770
1771  return success;
1772#endif
1773}
1774
1775/**
1776 * @brief Tries to change the thread wait flags with acquire semantics.
1777 *
1778 * In case the wait flags are equal to the expected wait flags, then the wait
1779 * flags are set to the desired wait flags.
1780 *
1781 * @param[in] the_thread The thread.
1782 * @param[in] expected_flags The expected wait flags.
1783 * @param[in] desired_flags The desired wait flags.
1784 *
1785 * @retval true The wait flags were equal to the expected wait flags.
1786 * @retval false Otherwise.
1787 */
1788RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1789  Thread_Control    *the_thread,
1790  Thread_Wait_flags  expected_flags,
1791  Thread_Wait_flags  desired_flags
1792)
1793{
1794  bool success;
1795#if defined(RTEMS_SMP)
1796  return _Atomic_Compare_exchange_uint(
1797    &the_thread->Wait.flags,
1798    &expected_flags,
1799    desired_flags,
1800    ATOMIC_ORDER_ACQUIRE,
1801    ATOMIC_ORDER_ACQUIRE
1802  );
1803#else
1804  ISR_Level level;
1805
1806  _ISR_Local_disable( level );
1807
1808  success = _Thread_Wait_flags_try_change_release(
1809    the_thread,
1810    expected_flags,
1811    desired_flags
1812  );
1813
1814  _ISR_Local_enable( level );
1815#endif
1816
1817  return success;
1818}
1819
1820/**
1821 * @brief Returns the object identifier of the object containing the current
1822 * thread wait queue.
1823 *
1824 * This function may be used for debug and system information purposes.  The
1825 * caller must be the owner of the thread lock.
1826 *
1827 * @retval 0 The thread waits on no thread queue currently, the thread wait
1828 *   queue is not contained in an object, or the current thread state provides
1829 *   insufficient information, e.g. the thread is in the middle of a blocking
1830 *   operation.
1831 * @retval other The object identifier of the object containing the thread wait
1832 *   queue.
1833 */
1834Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1835
1836RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1837  const Thread_Control *the_thread
1838)
1839{
1840  return (Status_Control) the_thread->Wait.return_code;
1841}
1842
1843/**
1844 * @brief General purpose thread wait timeout.
1845 *
1846 * @param[in] watchdog The thread timer watchdog.
1847 */
1848void _Thread_Timeout( Watchdog_Control *watchdog );
1849
1850RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1851  Thread_Timer_information *timer,
1852  Per_CPU_Control          *cpu
1853)
1854{
1855  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1856  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_MONOTONIC ];
1857  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1858}
1859
1860RTEMS_INLINE_ROUTINE void _Thread_Add_timeout_ticks(
1861  Thread_Control    *the_thread,
1862  Per_CPU_Control   *cpu,
1863  Watchdog_Interval  ticks
1864)
1865{
1866  ISR_lock_Context lock_context;
1867
1868  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1869
1870  the_thread->Timer.header =
1871    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_MONOTONIC ];
1872  the_thread->Timer.Watchdog.routine = _Thread_Timeout;
1873  _Watchdog_Per_CPU_insert_ticks( &the_thread->Timer.Watchdog, cpu, ticks );
1874
1875  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1876}
1877
1878RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_realtime(
1879  Thread_Control                 *the_thread,
1880  Per_CPU_Control                *cpu,
1881  Watchdog_Service_routine_entry  routine,
1882  uint64_t                        expire
1883)
1884{
1885  ISR_lock_Context lock_context;
1886
1887  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1888
1889  the_thread->Timer.header =
1890    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
1891  the_thread->Timer.Watchdog.routine = routine;
1892  _Watchdog_Per_CPU_insert_realtime( &the_thread->Timer.Watchdog, cpu, expire );
1893
1894  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1895}
1896
1897RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1898{
1899  ISR_lock_Context lock_context;
1900
1901  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1902
1903  _Watchdog_Per_CPU_remove(
1904    &the_thread->Timer.Watchdog,
1905#if defined(RTEMS_SMP)
1906    the_thread->Timer.Watchdog.cpu,
1907#else
1908    _Per_CPU_Get(),
1909#endif
1910    the_thread->Timer.header
1911  );
1912
1913  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1914}
1915
1916RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1917  Thread_Control     *the_thread,
1918  Thread_queue_Queue *queue
1919)
1920{
1921  _Thread_Wait_tranquilize( the_thread );
1922  _Thread_Timer_remove( the_thread );
1923
1924#if defined(RTEMS_MULTIPROCESSING)
1925  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1926    _Thread_Unblock( the_thread );
1927  } else {
1928    _Thread_queue_Unblock_proxy( queue, the_thread );
1929  }
1930#else
1931  (void) queue;
1932  _Thread_Unblock( the_thread );
1933#endif
1934}
1935
1936Status_Control _Thread_Set_name(
1937  Thread_Control *the_thread,
1938  const char     *name
1939);
1940
1941size_t _Thread_Get_name(
1942  const Thread_Control *the_thread,
1943  char                 *buffer,
1944  size_t                buffer_size
1945);
1946
1947/** @}*/
1948
1949#ifdef __cplusplus
1950}
1951#endif
1952
1953#if defined(RTEMS_MULTIPROCESSING)
1954#include <rtems/score/threadmp.h>
1955#endif
1956
1957#endif
1958/* end of include file */
Note: See TracBrowser for help on using the repository browser.