source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ bbfbfc8

5
Last change on this file since bbfbfc8 was bbfbfc8, checked in by Sebastian Huber <sebastian.huber@…>, on 02/03/17 at 07:57:18

score: Move _Thread_Scheduler_ask_for_help()

Move _Thread_Scheduler_ask_for_help(), rename it to
_Thread_Ask_for_help() and make it static.

  • Property mode set to 100644
File size: 51.5 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2017 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/schedulernodeimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/threadqimpl.h>
35#include <rtems/score/todimpl.h>
36#include <rtems/score/freechain.h>
37#include <rtems/score/watchdogimpl.h>
38#include <rtems/config.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/**
45 * @addtogroup ScoreThread
46 */
47/**@{**/
48
49/**
50 *  The following structure contains the information necessary to manage
51 *  a thread which it is  waiting for a resource.
52 */
53#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
54
55/**
56 *  Self for the GNU Ada Run-Time
57 */
58extern void *rtems_ada_self;
59
60typedef struct {
61  Objects_Information Objects;
62
63  Freechain_Control Free_thread_queue_heads;
64} Thread_Information;
65
66/**
67 *  The following defines the information control block used to
68 *  manage this class of objects.
69 */
70extern Thread_Information _Thread_Internal_information;
71
72/**
73 *  The following points to the thread whose floating point
74 *  context is currently loaded.
75 */
76#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
77extern Thread_Control *_Thread_Allocated_fp;
78#endif
79
80#if defined(RTEMS_SMP)
81#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
82  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
83#endif
84
85typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
86
87void _Thread_Iterate(
88  Thread_Visitor  visitor,
89  void           *arg
90);
91
92void _Thread_Initialize_information(
93  Thread_Information  *information,
94  Objects_APIs         the_api,
95  uint16_t             the_class,
96  uint32_t             maximum,
97  bool                 is_string,
98  uint32_t             maximum_name_length
99);
100
101/**
102 *  @brief Initialize thread handler.
103 *
104 *  This routine performs the initialization necessary for this handler.
105 */
106void _Thread_Handler_initialization(void);
107
108/**
109 *  @brief Create idle thread.
110 *
111 *  This routine creates the idle thread.
112 *
113 *  @warning No thread should be created before this one.
114 */
115void _Thread_Create_idle(void);
116
117/**
118 *  @brief Start thread multitasking.
119 *
120 *  This routine initiates multitasking.  It is invoked only as
121 *  part of initialization and its invocation is the last act of
122 *  the non-multitasking part of the system initialization.
123 */
124void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
125
126/**
127 *  @brief Allocate the requested stack space for the thread.
128 *
129 *  Allocate the requested stack space for the thread.
130 *  Set the Start.stack field to the address of the stack.
131 *
132 *  @param[in] the_thread is the thread where the stack space is requested
133 *  @param[in] stack_size is the stack space is requested
134 *
135 *  @retval actual size allocated after any adjustment
136 *  @retval zero if the allocation failed
137 */
138size_t _Thread_Stack_Allocate(
139  Thread_Control *the_thread,
140  size_t          stack_size
141);
142
143/**
144 *  @brief Deallocate thread stack.
145 *
146 *  Deallocate the Thread's stack.
147 */
148void _Thread_Stack_Free(
149  Thread_Control *the_thread
150);
151
152/**
153 *  @brief Initialize thread.
154 *
155 *  This routine initializes the specified the thread.  It allocates
156 *  all memory associated with this thread.  It completes by adding
157 *  the thread to the local object table so operations on this
158 *  thread id are allowed.
159 *
160 *  @note If stack_area is NULL, it is allocated from the workspace.
161 *
162 *  @note If the stack is allocated from the workspace, then it is
163 *        guaranteed to be of at least minimum size.
164 */
165bool _Thread_Initialize(
166  Thread_Information                   *information,
167  Thread_Control                       *the_thread,
168  const struct Scheduler_Control       *scheduler,
169  void                                 *stack_area,
170  size_t                                stack_size,
171  bool                                  is_fp,
172  Priority_Control                      priority,
173  bool                                  is_preemptible,
174  Thread_CPU_budget_algorithms          budget_algorithm,
175  Thread_CPU_budget_algorithm_callout   budget_callout,
176  uint32_t                              isr_level,
177  Objects_Name                          name
178);
179
180/**
181 *  @brief Initializes thread and executes it.
182 *
183 *  This routine initializes the executable information for a thread
184 *  and makes it ready to execute.  After this routine executes, the
185 *  thread competes with all other threads for CPU time.
186 *
187 *  @param the_thread The thread to be started.
188 *  @param entry The thread entry information.
189 */
190bool _Thread_Start(
191  Thread_Control                 *the_thread,
192  const Thread_Entry_information *entry,
193  ISR_lock_Context               *lock_context
194);
195
196void _Thread_Restart_self(
197  Thread_Control                 *executing,
198  const Thread_Entry_information *entry,
199  ISR_lock_Context               *lock_context
200) RTEMS_NO_RETURN;
201
202bool _Thread_Restart_other(
203  Thread_Control                 *the_thread,
204  const Thread_Entry_information *entry,
205  ISR_lock_Context               *lock_context
206);
207
208void _Thread_Yield( Thread_Control *executing );
209
210Thread_Life_state _Thread_Change_life(
211  Thread_Life_state clear,
212  Thread_Life_state set,
213  Thread_Life_state ignore
214);
215
216Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
217
218/**
219 * @brief Kills all zombie threads in the system.
220 *
221 * Threads change into the zombie state as the last step in the thread
222 * termination sequence right before a context switch to the heir thread is
223 * initiated.  Since the thread stack is still in use during this phase we have
224 * to postpone the thread stack reclamation until this point.  On SMP
225 * configurations we may have to busy wait for context switch completion here.
226 */
227void _Thread_Kill_zombies( void );
228
229void _Thread_Exit(
230  Thread_Control    *executing,
231  Thread_Life_state  set,
232  void              *exit_value
233);
234
235void _Thread_Join(
236  Thread_Control       *the_thread,
237  States_Control        waiting_for_join,
238  Thread_Control       *executing,
239  Thread_queue_Context *queue_context
240);
241
242void _Thread_Cancel(
243  Thread_Control *the_thread,
244  Thread_Control *executing,
245  void           *exit_value
246);
247
248typedef struct {
249  Thread_queue_Context  Base;
250  Thread_Control       *cancel;
251} Thread_Close_context;
252
253/**
254 * @brief Closes the thread.
255 *
256 * Closes the thread object and starts the thread termination sequence.  In
257 * case the executing thread is not terminated, then this function waits until
258 * the terminating thread reached the zombie state.
259 */
260void _Thread_Close(
261  Thread_Control       *the_thread,
262  Thread_Control       *executing,
263  Thread_Close_context *context
264);
265
266RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
267{
268  return _States_Is_ready( the_thread->current_state );
269}
270
271States_Control _Thread_Clear_state_locked(
272  Thread_Control *the_thread,
273  States_Control  state
274);
275
276/**
277 * @brief Clears the specified thread state.
278 *
279 * In case the previous state is a non-ready state and the next state is the
280 * ready state, then the thread is unblocked by the scheduler.
281 *
282 * @param[in] the_thread The thread.
283 * @param[in] state The state to clear.  It must not be zero.
284 *
285 * @return The previous state.
286 */
287States_Control _Thread_Clear_state(
288  Thread_Control *the_thread,
289  States_Control  state
290);
291
292States_Control _Thread_Set_state_locked(
293  Thread_Control *the_thread,
294  States_Control  state
295);
296
297/**
298 * @brief Sets the specified thread state.
299 *
300 * In case the previous state is the ready state, then the thread is blocked by
301 * the scheduler.
302 *
303 * @param[in] the_thread The thread.
304 * @param[in] state The state to set.  It must not be zero.
305 *
306 * @return The previous state.
307 */
308States_Control _Thread_Set_state(
309  Thread_Control *the_thread,
310  States_Control  state
311);
312
313/**
314 *  @brief Initializes enviroment for a thread.
315 *
316 *  This routine initializes the context of @a the_thread to its
317 *  appropriate starting state.
318 *
319 *  @param[in] the_thread is the pointer to the thread control block.
320 */
321void _Thread_Load_environment(
322  Thread_Control *the_thread
323);
324
325void _Thread_Entry_adaptor_idle( Thread_Control *executing );
326
327void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
328
329void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
330
331/**
332 *  @brief Wrapper function for all threads.
333 *
334 *  This routine is the wrapper function for all threads.  It is
335 *  the starting point for all threads.  The user provided thread
336 *  entry point is invoked by this routine.  Operations
337 *  which must be performed immediately before and after the user's
338 *  thread executes are found here.
339 *
340 *  @note On entry, it is assumed all interrupts are blocked and that this
341 *  routine needs to set the initial isr level.  This may or may not
342 *  actually be needed by the context switch routine and as a result
343 *  interrupts may already be at there proper level.  Either way,
344 *  setting the initial isr level properly here is safe.
345 */
346void _Thread_Handler( void );
347
348/**
349 * @brief Executes the global constructors and then restarts itself as the
350 * first initialization thread.
351 *
352 * The first initialization thread is the first RTEMS initialization task or
353 * the first POSIX initialization thread in case no RTEMS initialization tasks
354 * are present.
355 */
356void _Thread_Global_construction(
357  Thread_Control                 *executing,
358  const Thread_Entry_information *entry
359) RTEMS_NO_RETURN;
360
361/**
362 *  @brief Ended the delay of a thread.
363 *
364 *  This routine is invoked when a thread must be unblocked at the
365 *  end of a time based delay (i.e. wake after or wake when).
366 *  It is called by the watchdog handler.
367 *
368 *  @param[in] id is the thread id
369 *  @param[in] ignored is not used
370 */
371void _Thread_Delay_ended(
372  Objects_Id  id,
373  void       *ignored
374);
375
376RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
377  Thread_Control   *the_thread,
378  ISR_lock_Context *lock_context
379)
380{
381  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
382}
383
384RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
385  Thread_Control   *the_thread,
386  ISR_lock_Context *lock_context
387)
388{
389  _ISR_lock_ISR_disable( lock_context );
390  _Thread_State_acquire_critical( the_thread, lock_context );
391}
392
393RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
394  ISR_lock_Context *lock_context
395)
396{
397  Thread_Control *executing;
398
399  _ISR_lock_ISR_disable( lock_context );
400  executing = _Thread_Executing;
401  _Thread_State_acquire_critical( executing, lock_context );
402
403  return executing;
404}
405
406RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
407  Thread_Control   *the_thread,
408  ISR_lock_Context *lock_context
409)
410{
411  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
412}
413
414RTEMS_INLINE_ROUTINE void _Thread_State_release(
415  Thread_Control   *the_thread,
416  ISR_lock_Context *lock_context
417)
418{
419  _Thread_State_release_critical( the_thread, lock_context );
420  _ISR_lock_ISR_enable( lock_context );
421}
422
423#if defined(RTEMS_DEBUG)
424RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
425  const Thread_Control *the_thread
426)
427{
428  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
429}
430#endif
431
432/**
433 * @brief Performs the priority actions specified by the thread queue context
434 * along the thread queue path.
435 *
436 * The caller must be the owner of the thread wait lock.
437 *
438 * @param start_of_path The start thread of the thread queue path.
439 * @param queue_context The thread queue context specifying the thread queue
440 *   path and initial thread priority actions.
441 *
442 * @see _Thread_queue_Path_acquire_critical().
443 */
444void _Thread_Priority_perform_actions(
445  Thread_Control       *start_of_path,
446  Thread_queue_Context *queue_context
447);
448
449/**
450 * @brief Adds the specified thread priority node to the corresponding thread
451 * priority aggregation.
452 *
453 * The caller must be the owner of the thread wait lock.
454 *
455 * @param the_thread The thread.
456 * @param priority_node The thread priority node to add.
457 * @param queue_context The thread queue context to return an updated set of
458 *   threads for _Thread_Priority_update().  The thread queue context must be
459 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
460 *   call of this function.
461 *
462 * @see _Thread_Wait_acquire().
463 */
464void _Thread_Priority_add(
465  Thread_Control       *the_thread,
466  Priority_Node        *priority_node,
467  Thread_queue_Context *queue_context
468);
469
470/**
471 * @brief Removes the specified thread priority node from the corresponding
472 * thread priority aggregation.
473 *
474 * The caller must be the owner of the thread wait lock.
475 *
476 * @param the_thread The thread.
477 * @param priority_node The thread priority node to remove.
478 * @param queue_context The thread queue context to return an updated set of
479 *   threads for _Thread_Priority_update().  The thread queue context must be
480 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
481 *   call of this function.
482 *
483 * @see _Thread_Wait_acquire().
484 */
485void _Thread_Priority_remove(
486  Thread_Control       *the_thread,
487  Priority_Node        *priority_node,
488  Thread_queue_Context *queue_context
489);
490
491/**
492 * @brief Propagates a thread priority value change in the specified thread
493 * priority node to the corresponding thread priority aggregation.
494 *
495 * The caller must be the owner of the thread wait lock.
496 *
497 * @param the_thread The thread.
498 * @param priority_node The thread priority node to change.
499 * @param prepend_it In case this is true, then the thread is prepended to
500 *   its priority group in its home scheduler instance, otherwise it is
501 *   appended.
502 * @param queue_context The thread queue context to return an updated set of
503 *   threads for _Thread_Priority_update().  The thread queue context must be
504 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
505 *   call of this function.
506 *
507 * @see _Thread_Wait_acquire().
508 */
509void _Thread_Priority_changed(
510  Thread_Control       *the_thread,
511  Priority_Node        *priority_node,
512  bool                  prepend_it,
513  Thread_queue_Context *queue_context
514);
515
516/**
517 * @brief Changes the thread priority value of the specified thread priority
518 * node in the corresponding thread priority aggregation.
519 *
520 * The caller must be the owner of the thread wait lock.
521 *
522 * @param the_thread The thread.
523 * @param priority_node The thread priority node to change.
524 * @param new_priority The new thread priority value of the thread priority
525 *   node to change.
526 * @param prepend_it In case this is true, then the thread is prepended to
527 *   its priority group in its home scheduler instance, otherwise it is
528 *   appended.
529 * @param queue_context The thread queue context to return an updated set of
530 *   threads for _Thread_Priority_update().  The thread queue context must be
531 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
532 *   call of this function.
533 *
534 * @see _Thread_Wait_acquire().
535 */
536RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
537  Thread_Control       *the_thread,
538  Priority_Node        *priority_node,
539  Priority_Control      new_priority,
540  bool                  prepend_it,
541  Thread_queue_Context *queue_context
542)
543{
544  _Priority_Node_set_priority( priority_node, new_priority );
545  _Thread_Priority_changed(
546    the_thread,
547    priority_node,
548    prepend_it,
549    queue_context
550  );
551}
552
553/**
554 * @brief Replaces the victim priority node with the replacement priority node
555 * in the corresponding thread priority aggregation.
556 *
557 * The caller must be the owner of the thread wait lock.
558 *
559 * @param the_thread The thread.
560 * @param victim_node The victim thread priority node.
561 * @param replacement_node The replacement thread priority node.
562 *
563 * @see _Thread_Wait_acquire().
564 */
565void _Thread_Priority_replace(
566  Thread_Control *the_thread,
567  Priority_Node  *victim_node,
568  Priority_Node  *replacement_node
569);
570
571/**
572 * @brief Adds a priority node to the corresponding thread priority
573 * aggregation.
574 *
575 * The caller must be the owner of the thread wait lock.
576 *
577 * @param the_thread The thread.
578 * @param priority_node The thread priority node to add.
579 * @param queue_context The thread queue context to return an updated set of
580 *   threads for _Thread_Priority_update().  The thread queue context must be
581 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
582 *   call of this function.
583 *
584 * @see _Thread_Priority_add(), _Thread_Priority_change(),
585 *   _Thread_Priority_changed() and _Thread_Priority_remove().
586 */
587void _Thread_Priority_update( Thread_queue_Context *queue_context );
588
589#if defined(RTEMS_SMP)
590void _Thread_Priority_and_sticky_update(
591  Thread_Control *the_thread,
592  int             sticky_level_change
593);
594#endif
595
596/**
597 * @brief Returns true if the left thread priority is less than the right
598 * thread priority in the intuitive sense of priority and false otherwise.
599 */
600RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
601  Priority_Control left,
602  Priority_Control right
603)
604{
605  return left > right;
606}
607
608/**
609 * @brief Returns the highest priority of the left and right thread priorities
610 * in the intuitive sense of priority.
611 */
612RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
613  Priority_Control left,
614  Priority_Control right
615)
616{
617  return _Thread_Priority_less_than( left, right ) ? right : left;
618}
619
620RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
621  Objects_Id id
622)
623{
624  uint32_t the_api;
625
626  the_api = _Objects_Get_API( id );
627
628  if ( !_Objects_Is_api_valid( the_api ) ) {
629    return NULL;
630  }
631
632  /*
633   * Threads are always first class :)
634   *
635   * There is no need to validate the object class of the object identifier,
636   * since this will be done by the object get methods.
637   */
638  return _Objects_Information_table[ the_api ][ 1 ];
639}
640
641/**
642 * @brief Gets a thread by its identifier.
643 *
644 * @see _Objects_Get().
645 */
646Thread_Control *_Thread_Get(
647  Objects_Id         id,
648  ISR_lock_Context  *lock_context
649);
650
651RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
652  const Thread_Control *thread
653)
654{
655#if defined(RTEMS_SMP)
656  return thread->Scheduler.cpu;
657#else
658  (void) thread;
659
660  return _Per_CPU_Get();
661#endif
662}
663
664RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
665  Thread_Control *thread,
666  Per_CPU_Control *cpu
667)
668{
669#if defined(RTEMS_SMP)
670  thread->Scheduler.cpu = cpu;
671#else
672  (void) thread;
673  (void) cpu;
674#endif
675}
676
677/**
678 * This function returns true if the_thread is the currently executing
679 * thread, and false otherwise.
680 */
681
682RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
683  const Thread_Control *the_thread
684)
685{
686  return ( the_thread == _Thread_Executing );
687}
688
689#if defined(RTEMS_SMP)
690/**
691 * @brief Returns @a true in case the thread executes currently on some
692 * processor in the system, otherwise @a false.
693 *
694 * Do not confuse this with _Thread_Is_executing() which checks only the
695 * current processor.
696 */
697RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
698  const Thread_Control *the_thread
699)
700{
701  return _CPU_Context_Get_is_executing( &the_thread->Registers );
702}
703#endif
704
705/**
706 * This function returns true if the_thread is the heir
707 * thread, and false otherwise.
708 */
709
710RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
711  const Thread_Control *the_thread
712)
713{
714  return ( the_thread == _Thread_Heir );
715}
716
717/**
718 * This routine clears any blocking state for the_thread.  It performs
719 * any necessary scheduling operations including the selection of
720 * a new heir thread.
721 */
722
723RTEMS_INLINE_ROUTINE void _Thread_Unblock (
724  Thread_Control *the_thread
725)
726{
727  _Thread_Clear_state( the_thread, STATES_BLOCKED );
728}
729
730/**
731 * This function returns true if the floating point context of
732 * the_thread is currently loaded in the floating point unit, and
733 * false otherwise.
734 */
735
736#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
737RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
738  const Thread_Control *the_thread
739)
740{
741  return ( the_thread == _Thread_Allocated_fp );
742}
743#endif
744
745/*
746 *  If the CPU has hardware floating point, then we must address saving
747 *  and restoring it as part of the context switch.
748 *
749 *  The second conditional compilation section selects the algorithm used
750 *  to context switch between floating point tasks.  The deferred algorithm
751 *  can be significantly better in a system with few floating point tasks
752 *  because it reduces the total number of save and restore FP context
753 *  operations.  However, this algorithm can not be used on all CPUs due
754 *  to unpredictable use of FP registers by some compilers for integer
755 *  operations.
756 */
757
758RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
759{
760#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
761#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
762  if ( executing->fp_context != NULL )
763    _Context_Save_fp( &executing->fp_context );
764#endif
765#endif
766}
767
768RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
769{
770#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
771#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
772  if ( (executing->fp_context != NULL) &&
773       !_Thread_Is_allocated_fp( executing ) ) {
774    if ( _Thread_Allocated_fp != NULL )
775      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
776    _Context_Restore_fp( &executing->fp_context );
777    _Thread_Allocated_fp = executing;
778  }
779#else
780  if ( executing->fp_context != NULL )
781    _Context_Restore_fp( &executing->fp_context );
782#endif
783#endif
784}
785
786/**
787 * This routine is invoked when the currently loaded floating
788 * point context is now longer associated with an active thread.
789 */
790
791#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
792RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
793{
794  _Thread_Allocated_fp = NULL;
795}
796#endif
797
798/**
799 * This function returns true if dispatching is disabled, and false
800 * otherwise.
801 */
802
803RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
804{
805  return ( _Thread_Dispatch_necessary );
806}
807
808/**
809 * This function returns true if the_thread is NULL and false otherwise.
810 */
811
812RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
813  const Thread_Control *the_thread
814)
815{
816  return ( the_thread == NULL );
817}
818
819/**
820 * @brief Is proxy blocking.
821 *
822 * status which indicates that a proxy is blocking, and false otherwise.
823 */
824RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
825  uint32_t   code
826)
827{
828  return (code == THREAD_STATUS_PROXY_BLOCKING);
829}
830
831RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
832{
833  /* Idle threads */
834  uint32_t maximum_internal_threads =
835    rtems_configuration_get_maximum_processors();
836
837  /* MPCI thread */
838#if defined(RTEMS_MULTIPROCESSING)
839  if ( _System_state_Is_multiprocessing ) {
840    ++maximum_internal_threads;
841  }
842#endif
843
844  return maximum_internal_threads;
845}
846
847RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
848{
849  return (Thread_Control *)
850    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
851}
852
853/**
854 * @brief Gets the heir of the processor and makes it executing.
855 *
856 * Must be called with interrupts disabled.  The thread dispatch necessary
857 * indicator is cleared as a side-effect.
858 *
859 * @return The heir thread.
860 *
861 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
862 * _Thread_Dispatch_update_heir().
863 */
864RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
865  Per_CPU_Control *cpu_self
866)
867{
868  Thread_Control *heir;
869
870  heir = cpu_self->heir;
871  cpu_self->dispatch_necessary = false;
872  cpu_self->executing = heir;
873
874  return heir;
875}
876
877RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
878  Thread_Control  *the_thread,
879  Per_CPU_Control *cpu
880)
881{
882  Timestamp_Control last;
883  Timestamp_Control ran;
884
885  last = cpu->cpu_usage_timestamp;
886  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
887  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
888  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
889}
890
891#if defined( RTEMS_SMP )
892RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
893  Per_CPU_Control *cpu_self,
894  Per_CPU_Control *cpu_for_heir,
895  Thread_Control  *heir
896)
897{
898  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
899
900  cpu_for_heir->heir = heir;
901
902  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
903}
904#endif
905
906void _Thread_Get_CPU_time_used(
907  Thread_Control    *the_thread,
908  Timestamp_Control *cpu_time_used
909);
910
911RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
912  Thread_Action_control *action_control
913)
914{
915  _Chain_Initialize_empty( &action_control->Chain );
916}
917
918RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
919  Thread_Action *action
920)
921{
922  _Chain_Set_off_chain( &action->Node );
923}
924
925RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
926  Thread_Control        *the_thread,
927  Thread_Action         *action,
928  Thread_Action_handler  handler
929)
930{
931  Per_CPU_Control *cpu_of_thread;
932
933  _Assert( _Thread_State_is_owner( the_thread ) );
934
935  cpu_of_thread = _Thread_Get_CPU( the_thread );
936
937  action->handler = handler;
938
939  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
940
941  _Chain_Append_if_is_off_chain_unprotected(
942    &the_thread->Post_switch_actions.Chain,
943    &action->Node
944  );
945}
946
947RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
948  Thread_Life_state life_state
949)
950{
951  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
952}
953
954RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
955  Thread_Life_state life_state
956)
957{
958  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
959}
960
961RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
962  Thread_Life_state life_state
963)
964{
965  return ( life_state
966    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
967}
968
969RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
970  Thread_Life_state life_state
971)
972{
973  return ( life_state
974    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
975}
976
977RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
978  const Thread_Control *the_thread
979)
980{
981  _Assert( _Thread_State_is_owner( the_thread ) );
982  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
983}
984
985RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
986  Thread_Control *the_thread
987)
988{
989#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
990  ++the_thread->resource_count;
991#else
992  (void) the_thread;
993#endif
994}
995
996RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
997  Thread_Control *the_thread
998)
999{
1000#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1001  --the_thread->resource_count;
1002#else
1003  (void) the_thread;
1004#endif
1005}
1006
1007#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
1008/**
1009 * @brief Returns true if the thread owns resources, and false otherwise.
1010 *
1011 * Resources are accounted with the Thread_Control::resource_count resource
1012 * counter.  This counter is used by mutex objects for example.
1013 *
1014 * @param[in] the_thread The thread.
1015 */
1016RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
1017  const Thread_Control *the_thread
1018)
1019{
1020  return the_thread->resource_count != 0;
1021}
1022#endif
1023
1024#if defined(RTEMS_SMP)
1025RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
1026  Thread_Control  *the_thread,
1027  Per_CPU_Control *cpu
1028)
1029{
1030  _Per_CPU_Acquire( cpu );
1031
1032  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
1033    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
1034    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
1035  }
1036
1037  _Per_CPU_Release( cpu );
1038}
1039#endif
1040
1041RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
1042  const Thread_Control *the_thread
1043)
1044{
1045#if defined(RTEMS_SMP)
1046  return the_thread->Scheduler.home;
1047#else
1048  (void) the_thread;
1049  return &_Scheduler_Table[ 0 ];
1050#endif
1051}
1052
1053RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1054  const Thread_Control *the_thread
1055)
1056{
1057#if defined(RTEMS_SMP)
1058  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1059  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1060    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1061  );
1062#else
1063  return the_thread->Scheduler.nodes;
1064#endif
1065}
1066
1067RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1068  const Thread_Control *the_thread,
1069  size_t                scheduler_index
1070)
1071{
1072#if defined(RTEMS_SMP)
1073  return (Scheduler_Node *)
1074    ( (uintptr_t) the_thread->Scheduler.nodes
1075      + scheduler_index * _Scheduler_Node_size );
1076#else
1077  _Assert( scheduler_index == 0 );
1078  (void) scheduler_index;
1079  return the_thread->Scheduler.nodes;
1080#endif
1081}
1082
1083#if defined(RTEMS_SMP)
1084RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1085  Thread_Control   *the_thread,
1086  ISR_lock_Context *lock_context
1087)
1088{
1089  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1090}
1091
1092RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1093  Thread_Control   *the_thread,
1094  ISR_lock_Context *lock_context
1095)
1096{
1097  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1098}
1099
1100#if defined(RTEMS_SMP)
1101void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1102#endif
1103
1104RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1105  Thread_Control         *the_thread,
1106  Scheduler_Node         *scheduler_node,
1107  Scheduler_Node_request  request
1108)
1109{
1110  ISR_lock_Context       lock_context;
1111  Scheduler_Node_request current_request;
1112
1113  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1114
1115  current_request = scheduler_node->Thread.request;
1116
1117  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1118    _Assert(
1119      request == SCHEDULER_NODE_REQUEST_ADD
1120        || request == SCHEDULER_NODE_REQUEST_REMOVE
1121    );
1122    _Assert( scheduler_node->Thread.next_request == NULL );
1123    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1124    the_thread->Scheduler.requests = scheduler_node;
1125  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1126    _Assert(
1127      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1128        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1129      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1130        && request == SCHEDULER_NODE_REQUEST_ADD )
1131    );
1132    request = SCHEDULER_NODE_REQUEST_NOTHING;
1133  }
1134
1135  scheduler_node->Thread.request = request;
1136
1137  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1138}
1139
1140RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1141  Thread_Control *the_thread,
1142  Scheduler_Node *scheduler_node
1143)
1144{
1145  _Chain_Append_unprotected(
1146    &the_thread->Scheduler.Wait_nodes,
1147    &scheduler_node->Thread.Wait_node
1148  );
1149  _Thread_Scheduler_add_request(
1150    the_thread,
1151    scheduler_node,
1152    SCHEDULER_NODE_REQUEST_ADD
1153  );
1154}
1155
1156RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1157  Thread_Control *the_thread,
1158  Scheduler_Node *scheduler_node
1159)
1160{
1161  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1162  _Thread_Scheduler_add_request(
1163    the_thread,
1164    scheduler_node,
1165    SCHEDULER_NODE_REQUEST_REMOVE
1166  );
1167}
1168#endif
1169
1170/**
1171 * @brief Returns the priority of the thread.
1172 *
1173 * Returns the user API and thread wait information relevant thread priority.
1174 * This includes temporary thread priority adjustments due to locking
1175 * protocols, a job release or the POSIX sporadic server for example.
1176 *
1177 * @return The priority of the thread.
1178 */
1179RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1180  const Thread_Control *the_thread
1181)
1182{
1183  Scheduler_Node *scheduler_node;
1184
1185  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1186  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1187}
1188
1189/**
1190 * @brief Acquires the thread wait default lock inside a critical section
1191 * (interrupts disabled).
1192 *
1193 * @param[in] the_thread The thread.
1194 * @param[in] lock_context The lock context used for the corresponding lock
1195 *   release.
1196 *
1197 * @see _Thread_Wait_release_default_critical().
1198 */
1199RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1200  Thread_Control   *the_thread,
1201  ISR_lock_Context *lock_context
1202)
1203{
1204  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1205}
1206
1207/**
1208 * @brief Acquires the thread wait default lock and returns the executing
1209 * thread.
1210 *
1211 * @param[in] lock_context The lock context used for the corresponding lock
1212 *   release.
1213 *
1214 * @return The executing thread.
1215 *
1216 * @see _Thread_Wait_release_default().
1217 */
1218RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1219  ISR_lock_Context *lock_context
1220)
1221{
1222  Thread_Control *executing;
1223
1224  _ISR_lock_ISR_disable( lock_context );
1225  executing = _Thread_Executing;
1226  _Thread_Wait_acquire_default_critical( executing, lock_context );
1227
1228  return executing;
1229}
1230
1231/**
1232 * @brief Acquires the thread wait default lock and disables interrupts.
1233 *
1234 * @param[in] the_thread The thread.
1235 * @param[in] lock_context The lock context used for the corresponding lock
1236 *   release.
1237 *
1238 * @see _Thread_Wait_release_default().
1239 */
1240RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1241  Thread_Control   *the_thread,
1242  ISR_lock_Context *lock_context
1243)
1244{
1245  _ISR_lock_ISR_disable( lock_context );
1246  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1247}
1248
1249/**
1250 * @brief Releases the thread wait default lock inside a critical section
1251 * (interrupts disabled).
1252 *
1253 * The previous interrupt status is not restored.
1254 *
1255 * @param[in] the_thread The thread.
1256 * @param[in] lock_context The lock context used for the corresponding lock
1257 *   acquire.
1258 */
1259RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1260  Thread_Control   *the_thread,
1261  ISR_lock_Context *lock_context
1262)
1263{
1264  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1265}
1266
1267/**
1268 * @brief Releases the thread wait default lock and restores the previous
1269 * interrupt status.
1270 *
1271 * @param[in] the_thread The thread.
1272 * @param[in] lock_context The lock context used for the corresponding lock
1273 *   acquire.
1274 */
1275RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1276  Thread_Control   *the_thread,
1277  ISR_lock_Context *lock_context
1278)
1279{
1280  _Thread_Wait_release_default_critical( the_thread, lock_context );
1281  _ISR_lock_ISR_enable( lock_context );
1282}
1283
1284#if defined(RTEMS_SMP)
1285#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1286  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1287
1288RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1289  Thread_Control            *the_thread,
1290  Thread_queue_Lock_context *queue_lock_context
1291)
1292{
1293  Chain_Node *first;
1294
1295  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1296  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1297
1298  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1299    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1300  }
1301}
1302
1303RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1304  Thread_queue_Queue        *queue,
1305  Thread_queue_Lock_context *queue_lock_context
1306)
1307{
1308  _Thread_queue_Queue_acquire_critical(
1309    queue,
1310    &_Thread_Executing->Potpourri_stats,
1311    &queue_lock_context->Lock_context
1312  );
1313}
1314
1315RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1316  Thread_queue_Queue        *queue,
1317  Thread_queue_Lock_context *queue_lock_context
1318)
1319{
1320  _Thread_queue_Queue_release_critical(
1321    queue,
1322    &queue_lock_context->Lock_context
1323  );
1324}
1325#endif
1326
1327/**
1328 * @brief Acquires the thread wait lock inside a critical section (interrupts
1329 * disabled).
1330 *
1331 * @param[in] the_thread The thread.
1332 * @param[in] queue_context The thread queue context for the corresponding
1333 *   _Thread_Wait_release_critical().
1334 */
1335RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1336  Thread_Control       *the_thread,
1337  Thread_queue_Context *queue_context
1338)
1339{
1340#if defined(RTEMS_SMP)
1341  Thread_queue_Queue *queue;
1342
1343  _Thread_Wait_acquire_default_critical(
1344    the_thread,
1345    &queue_context->Lock_context.Lock_context
1346  );
1347
1348  queue = the_thread->Wait.queue;
1349  queue_context->Lock_context.Wait.queue = queue;
1350
1351  if ( queue != NULL ) {
1352    _Thread_queue_Gate_add(
1353      &the_thread->Wait.Lock.Pending_requests,
1354      &queue_context->Lock_context.Wait.Gate
1355    );
1356    _Thread_Wait_release_default_critical(
1357      the_thread,
1358      &queue_context->Lock_context.Lock_context
1359    );
1360    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1361
1362    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1363      _Thread_Wait_release_queue_critical(
1364        queue,
1365        &queue_context->Lock_context
1366      );
1367      _Thread_Wait_acquire_default_critical(
1368        the_thread,
1369        &queue_context->Lock_context.Lock_context
1370      );
1371      _Thread_Wait_remove_request_locked(
1372        the_thread,
1373        &queue_context->Lock_context
1374      );
1375      _Assert( the_thread->Wait.queue == NULL );
1376    }
1377  }
1378#else
1379  (void) the_thread;
1380  (void) queue_context;
1381#endif
1382}
1383
1384/**
1385 * @brief Acquires the thread wait default lock and disables interrupts.
1386 *
1387 * @param[in] the_thread The thread.
1388 * @param[in] queue_context The thread queue context for the corresponding
1389 *   _Thread_Wait_release().
1390 */
1391RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1392  Thread_Control       *the_thread,
1393  Thread_queue_Context *queue_context
1394)
1395{
1396  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1397  _Thread_Wait_acquire_critical( the_thread, queue_context );
1398}
1399
1400/**
1401 * @brief Releases the thread wait lock inside a critical section (interrupts
1402 * disabled).
1403 *
1404 * The previous interrupt status is not restored.
1405 *
1406 * @param[in] the_thread The thread.
1407 * @param[in] queue_context The thread queue context used for corresponding
1408 *   _Thread_Wait_acquire_critical().
1409 */
1410RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1411  Thread_Control       *the_thread,
1412  Thread_queue_Context *queue_context
1413)
1414{
1415#if defined(RTEMS_SMP)
1416  Thread_queue_Queue *queue;
1417
1418  queue = queue_context->Lock_context.Wait.queue;
1419
1420  if ( queue != NULL ) {
1421    _Thread_Wait_release_queue_critical(
1422      queue, &queue_context->Lock_context
1423    );
1424    _Thread_Wait_acquire_default_critical(
1425      the_thread,
1426      &queue_context->Lock_context.Lock_context
1427    );
1428    _Thread_Wait_remove_request_locked(
1429      the_thread,
1430      &queue_context->Lock_context
1431    );
1432  }
1433
1434  _Thread_Wait_release_default_critical(
1435    the_thread,
1436    &queue_context->Lock_context.Lock_context
1437  );
1438#else
1439  (void) the_thread;
1440  (void) queue_context;
1441#endif
1442}
1443
1444/**
1445 * @brief Releases the thread wait lock and restores the previous interrupt
1446 * status.
1447 *
1448 * @param[in] the_thread The thread.
1449 * @param[in] queue_context The thread queue context used for corresponding
1450 *   _Thread_Wait_acquire().
1451 */
1452RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1453  Thread_Control       *the_thread,
1454  Thread_queue_Context *queue_context
1455)
1456{
1457  _Thread_Wait_release_critical( the_thread, queue_context );
1458  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1459}
1460
1461/**
1462 * @brief Claims the thread wait queue.
1463 *
1464 * The caller must not be the owner of the default thread wait lock.  The
1465 * caller must be the owner of the corresponding thread queue lock.  The
1466 * registration of the corresponding thread queue operations is deferred and
1467 * done after the deadlock detection.  This is crucial to support timeouts on
1468 * SMP configurations.
1469 *
1470 * @param[in] the_thread The thread.
1471 * @param[in] queue The new thread queue.
1472 *
1473 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
1474 */
1475RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1476  Thread_Control     *the_thread,
1477  Thread_queue_Queue *queue
1478)
1479{
1480  ISR_lock_Context lock_context;
1481
1482  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1483
1484  _Assert( the_thread->Wait.queue == NULL );
1485
1486#if defined(RTEMS_SMP)
1487  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1488  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1489  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1490#endif
1491
1492  the_thread->Wait.queue = queue;
1493
1494  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1495}
1496
1497/**
1498 * @brief Finalizes the thread wait queue claim via registration of the
1499 * corresponding thread queue operations.
1500 *
1501 * @param[in] the_thread The thread.
1502 * @param[in] operations The corresponding thread queue operations.
1503 */
1504RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
1505  Thread_Control                *the_thread,
1506  const Thread_queue_Operations *operations
1507)
1508{
1509  the_thread->Wait.operations = operations;
1510}
1511
1512/**
1513 * @brief Removes a thread wait lock request.
1514 *
1515 * On SMP configurations, removes a thread wait lock request.
1516 *
1517 * On other configurations, this function does nothing.
1518 *
1519 * @param[in] the_thread The thread.
1520 * @param[in] queue_lock_context The thread queue lock context used for
1521 *   corresponding _Thread_Wait_acquire().
1522 */
1523RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1524  Thread_Control            *the_thread,
1525  Thread_queue_Lock_context *queue_lock_context
1526)
1527{
1528#if defined(RTEMS_SMP)
1529  ISR_lock_Context lock_context;
1530
1531  _Thread_Wait_acquire_default( the_thread, &lock_context );
1532  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1533  _Thread_Wait_release_default( the_thread, &lock_context );
1534#else
1535  (void) the_thread;
1536  (void) queue_lock_context;
1537#endif
1538}
1539
1540/**
1541 * @brief Restores the default thread wait queue and operations.
1542 *
1543 * The caller must be the owner of the current thread wait queue lock.
1544 *
1545 * On SMP configurations, the pending requests are updated to use the stale
1546 * thread queue operations.
1547 *
1548 * @param[in] the_thread The thread.
1549 *
1550 * @see _Thread_Wait_claim().
1551 */
1552RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1553  Thread_Control *the_thread
1554)
1555{
1556#if defined(RTEMS_SMP)
1557  ISR_lock_Context  lock_context;
1558  Chain_Node       *node;
1559  const Chain_Node *tail;
1560
1561  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1562
1563  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1564  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1565
1566  if ( node != tail ) {
1567    do {
1568      Thread_queue_Context *queue_context;
1569
1570      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1571      queue_context->Lock_context.Wait.queue = NULL;
1572
1573      node = _Chain_Next( node );
1574    } while ( node != tail );
1575
1576    _Thread_queue_Gate_add(
1577      &the_thread->Wait.Lock.Pending_requests,
1578      &the_thread->Wait.Lock.Tranquilizer
1579    );
1580  } else {
1581    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1582  }
1583#endif
1584
1585  the_thread->Wait.queue = NULL;
1586  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1587
1588#if defined(RTEMS_SMP)
1589  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1590#endif
1591}
1592
1593/**
1594 * @brief Tranquilizes the thread after a wait on a thread queue.
1595 *
1596 * After the violent blocking procedure this function makes the thread calm and
1597 * peaceful again so that it can carry out its normal work.
1598 *
1599 * On SMP configurations, ensures that all pending thread wait lock requests
1600 * completed before the thread is able to begin a new thread wait procedure.
1601 *
1602 * On other configurations, this function does nothing.
1603 *
1604 * It must be called after a _Thread_Wait_claim() exactly once
1605 *  - after the corresponding thread queue lock was released, and
1606 *  - the default wait state is restored or some other processor is about to do
1607 *    this.
1608 *
1609 * @param[in] the_thread The thread.
1610 */
1611RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1612  Thread_Control *the_thread
1613)
1614{
1615#if defined(RTEMS_SMP)
1616  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1617#else
1618  (void) the_thread;
1619#endif
1620}
1621
1622/**
1623 * @brief Cancels a thread wait on a thread queue.
1624 *
1625 * @param[in] the_thread The thread.
1626 * @param[in] queue_context The thread queue context used for corresponding
1627 *   _Thread_Wait_acquire().
1628 */
1629RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1630  Thread_Control       *the_thread,
1631  Thread_queue_Context *queue_context
1632)
1633{
1634  Thread_queue_Queue *queue;
1635
1636  queue = the_thread->Wait.queue;
1637
1638#if defined(RTEMS_SMP)
1639  if ( queue != NULL ) {
1640    _Assert( queue_context->Lock_context.Wait.queue == queue );
1641#endif
1642
1643    ( *the_thread->Wait.operations->extract )(
1644      queue,
1645      the_thread,
1646      queue_context
1647    );
1648    _Thread_Wait_restore_default( the_thread );
1649
1650#if defined(RTEMS_SMP)
1651    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1652    queue_context->Lock_context.Wait.queue = queue;
1653  }
1654#endif
1655}
1656
1657/**
1658 * @brief The initial thread wait flags value set by _Thread_Initialize().
1659 */
1660#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1661
1662/**
1663 * @brief Mask to get the thread wait state flags.
1664 */
1665#define THREAD_WAIT_STATE_MASK 0xffU
1666
1667/**
1668 * @brief Indicates that the thread begins with the blocking operation.
1669 *
1670 * A blocking operation consists of an optional watchdog initialization and the
1671 * setting of the appropriate thread blocking state with the corresponding
1672 * scheduler block operation.
1673 */
1674#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1675
1676/**
1677 * @brief Indicates that the thread completed the blocking operation.
1678 */
1679#define THREAD_WAIT_STATE_BLOCKED 0x2U
1680
1681/**
1682 * @brief Indicates that a condition to end the thread wait occurred.
1683 *
1684 * This could be a timeout, a signal, an event or a resource availability.
1685 */
1686#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1687
1688/**
1689 * @brief Mask to get the thread wait class flags.
1690 */
1691#define THREAD_WAIT_CLASS_MASK 0xff00U
1692
1693/**
1694 * @brief Indicates that the thread waits for an event.
1695 */
1696#define THREAD_WAIT_CLASS_EVENT 0x100U
1697
1698/**
1699 * @brief Indicates that the thread waits for a system event.
1700 */
1701#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1702
1703/**
1704 * @brief Indicates that the thread waits for an object.
1705 */
1706#define THREAD_WAIT_CLASS_OBJECT 0x400U
1707
1708/**
1709 * @brief Indicates that the thread waits for a period.
1710 */
1711#define THREAD_WAIT_CLASS_PERIOD 0x800U
1712
1713RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1714  Thread_Control    *the_thread,
1715  Thread_Wait_flags  flags
1716)
1717{
1718#if defined(RTEMS_SMP)
1719  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1720#else
1721  the_thread->Wait.flags = flags;
1722#endif
1723}
1724
1725RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1726  const Thread_Control *the_thread
1727)
1728{
1729#if defined(RTEMS_SMP)
1730  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1731#else
1732  return the_thread->Wait.flags;
1733#endif
1734}
1735
1736RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
1737  const Thread_Control *the_thread
1738)
1739{
1740#if defined(RTEMS_SMP)
1741  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
1742#else
1743  return the_thread->Wait.flags;
1744#endif
1745}
1746
1747/**
1748 * @brief Tries to change the thread wait flags with release semantics in case
1749 * of success.
1750 *
1751 * Must be called inside a critical section (interrupts disabled).
1752 *
1753 * In case the wait flags are equal to the expected wait flags, then the wait
1754 * flags are set to the desired wait flags.
1755 *
1756 * @param[in] the_thread The thread.
1757 * @param[in] expected_flags The expected wait flags.
1758 * @param[in] desired_flags The desired wait flags.
1759 *
1760 * @retval true The wait flags were equal to the expected wait flags.
1761 * @retval false Otherwise.
1762 */
1763RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1764  Thread_Control    *the_thread,
1765  Thread_Wait_flags  expected_flags,
1766  Thread_Wait_flags  desired_flags
1767)
1768{
1769  _Assert( _ISR_Get_level() != 0 );
1770
1771#if defined(RTEMS_SMP)
1772  return _Atomic_Compare_exchange_uint(
1773    &the_thread->Wait.flags,
1774    &expected_flags,
1775    desired_flags,
1776    ATOMIC_ORDER_RELEASE,
1777    ATOMIC_ORDER_RELAXED
1778  );
1779#else
1780  bool success = ( the_thread->Wait.flags == expected_flags );
1781
1782  if ( success ) {
1783    the_thread->Wait.flags = desired_flags;
1784  }
1785
1786  return success;
1787#endif
1788}
1789
1790/**
1791 * @brief Tries to change the thread wait flags with acquire semantics.
1792 *
1793 * In case the wait flags are equal to the expected wait flags, then the wait
1794 * flags are set to the desired wait flags.
1795 *
1796 * @param[in] the_thread The thread.
1797 * @param[in] expected_flags The expected wait flags.
1798 * @param[in] desired_flags The desired wait flags.
1799 *
1800 * @retval true The wait flags were equal to the expected wait flags.
1801 * @retval false Otherwise.
1802 */
1803RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1804  Thread_Control    *the_thread,
1805  Thread_Wait_flags  expected_flags,
1806  Thread_Wait_flags  desired_flags
1807)
1808{
1809  bool success;
1810#if defined(RTEMS_SMP)
1811  return _Atomic_Compare_exchange_uint(
1812    &the_thread->Wait.flags,
1813    &expected_flags,
1814    desired_flags,
1815    ATOMIC_ORDER_ACQUIRE,
1816    ATOMIC_ORDER_ACQUIRE
1817  );
1818#else
1819  ISR_Level level;
1820
1821  _ISR_Local_disable( level );
1822
1823  success = _Thread_Wait_flags_try_change_release(
1824    the_thread,
1825    expected_flags,
1826    desired_flags
1827  );
1828
1829  _ISR_Local_enable( level );
1830#endif
1831
1832  return success;
1833}
1834
1835/**
1836 * @brief Returns the object identifier of the object containing the current
1837 * thread wait queue.
1838 *
1839 * This function may be used for debug and system information purposes.  The
1840 * caller must be the owner of the thread lock.
1841 *
1842 * @retval 0 The thread waits on no thread queue currently, the thread wait
1843 *   queue is not contained in an object, or the current thread state provides
1844 *   insufficient information, e.g. the thread is in the middle of a blocking
1845 *   operation.
1846 * @retval other The object identifier of the object containing the thread wait
1847 *   queue.
1848 */
1849Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1850
1851RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1852  const Thread_Control *the_thread
1853)
1854{
1855  return (Status_Control) the_thread->Wait.return_code;
1856}
1857
1858/**
1859 * @brief General purpose thread wait timeout.
1860 *
1861 * @param[in] watchdog The thread timer watchdog.
1862 */
1863void _Thread_Timeout( Watchdog_Control *watchdog );
1864
1865RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1866  Thread_Timer_information *timer,
1867  Per_CPU_Control          *cpu
1868)
1869{
1870  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1871  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1872  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1873}
1874
1875RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative(
1876  Thread_Control                 *the_thread,
1877  Per_CPU_Control                *cpu,
1878  Watchdog_Service_routine_entry  routine,
1879  Watchdog_Interval               ticks
1880)
1881{
1882  ISR_lock_Context lock_context;
1883
1884  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1885
1886  the_thread->Timer.header =
1887    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1888  the_thread->Timer.Watchdog.routine = routine;
1889  _Watchdog_Per_CPU_insert_relative( &the_thread->Timer.Watchdog, cpu, ticks );
1890
1891  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1892}
1893
1894RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_absolute(
1895  Thread_Control                 *the_thread,
1896  Per_CPU_Control                *cpu,
1897  Watchdog_Service_routine_entry  routine,
1898  uint64_t                        expire
1899)
1900{
1901  ISR_lock_Context lock_context;
1902
1903  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1904
1905  the_thread->Timer.header =
1906    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_ABSOLUTE ];
1907  the_thread->Timer.Watchdog.routine = routine;
1908  _Watchdog_Per_CPU_insert_absolute( &the_thread->Timer.Watchdog, cpu, expire );
1909
1910  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1911}
1912
1913RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1914{
1915  ISR_lock_Context lock_context;
1916
1917  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1918
1919  _Watchdog_Per_CPU_remove(
1920    &the_thread->Timer.Watchdog,
1921#if defined(RTEMS_SMP)
1922    the_thread->Timer.Watchdog.cpu,
1923#else
1924    _Per_CPU_Get(),
1925#endif
1926    the_thread->Timer.header
1927  );
1928
1929  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1930}
1931
1932RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1933  Thread_Control     *the_thread,
1934  Thread_queue_Queue *queue
1935)
1936{
1937  _Thread_Wait_tranquilize( the_thread );
1938  _Thread_Timer_remove( the_thread );
1939
1940#if defined(RTEMS_MULTIPROCESSING)
1941  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1942    _Thread_Unblock( the_thread );
1943  } else {
1944    _Thread_queue_Unblock_proxy( queue, the_thread );
1945  }
1946#else
1947  (void) queue;
1948  _Thread_Unblock( the_thread );
1949#endif
1950}
1951
1952Status_Control _Thread_Set_name(
1953  Thread_Control *the_thread,
1954  const char     *name
1955);
1956
1957size_t _Thread_Get_name(
1958  const Thread_Control *the_thread,
1959  char                 *buffer,
1960  size_t                buffer_size
1961);
1962
1963/** @}*/
1964
1965#ifdef __cplusplus
1966}
1967#endif
1968
1969#if defined(RTEMS_MULTIPROCESSING)
1970#include <rtems/score/threadmp.h>
1971#endif
1972
1973#endif
1974/* end of include file */
Note: See TracBrowser for help on using the repository browser.