source: rtems/cpukit/include/rtems/score/threadimpl.h @ 21275b58

Last change on this file since 21275b58 was 21275b58, checked in by Sebastian Huber <sebastian.huber@…>, on Nov 22, 2018 at 6:14:51 PM

score: Static Objects_Information initialization

Statically allocate the objects information together with the initial
set of objects either via <rtems/confdefs.h>. Provide default object
informations with zero objects via librtemscpu.a. This greatly
simplifies the workspace size estimate. RTEMS applications which do not
use the unlimited objects option are easier to debug since all objects
reside now in statically allocated objects of the right types.

Close #3621.

  • Property mode set to 100644
File size: 51.9 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2017 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/schedulernodeimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/timestampimpl.h>
35#include <rtems/score/threadqimpl.h>
36#include <rtems/score/todimpl.h>
37#include <rtems/score/watchdogimpl.h>
38#include <rtems/config.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/**
45 * @addtogroup ScoreThread
46 */
47/**@{**/
48
49/**
50 *  The following structure contains the information necessary to manage
51 *  a thread which it is  waiting for a resource.
52 */
53#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
54
55/**
56 *  Self for the GNU Ada Run-Time
57 */
58extern void *rtems_ada_self;
59
60/**
61 * @brief Object identifier of the global constructor thread.
62 *
63 * This variable is set by _RTEMS_tasks_Initialize_user_tasks_body() or
64 * _POSIX_Threads_Initialize_user_threads_body().
65 *
66 * It is consumed by _Thread_Handler().
67 */
68extern Objects_Id _Thread_Global_constructor;
69
70/**
71 *  The following points to the thread whose floating point
72 *  context is currently loaded.
73 */
74#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
75extern Thread_Control *_Thread_Allocated_fp;
76#endif
77
78#if defined(RTEMS_SMP)
79#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
80  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
81#endif
82
83typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
84
85void _Thread_Iterate(
86  Thread_Visitor  visitor,
87  void           *arg
88);
89
90void _Thread_Initialize_information( Thread_Information *information );
91
92/**
93 *  @brief Initialize thread handler.
94 *
95 *  This routine performs the initialization necessary for this handler.
96 */
97void _Thread_Handler_initialization(void);
98
99/**
100 *  @brief Create idle thread.
101 *
102 *  This routine creates the idle thread.
103 *
104 *  @warning No thread should be created before this one.
105 */
106void _Thread_Create_idle(void);
107
108/**
109 *  @brief Start thread multitasking.
110 *
111 *  This routine initiates multitasking.  It is invoked only as
112 *  part of initialization and its invocation is the last act of
113 *  the non-multitasking part of the system initialization.
114 */
115void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
116
117/**
118 *  @brief Allocate the requested stack space for the thread.
119 *
120 *  Allocate the requested stack space for the thread.
121 *  Set the Start.stack field to the address of the stack.
122 *
123 *  @param[in] the_thread is the thread where the stack space is requested
124 *  @param[in] stack_size is the stack space is requested
125 *
126 *  @retval actual size allocated after any adjustment
127 *  @retval zero if the allocation failed
128 */
129size_t _Thread_Stack_Allocate(
130  Thread_Control *the_thread,
131  size_t          stack_size
132);
133
134/**
135 *  @brief Deallocate thread stack.
136 *
137 *  Deallocate the Thread's stack.
138 */
139void _Thread_Stack_Free(
140  Thread_Control *the_thread
141);
142
143/**
144 *  @brief Initialize thread.
145 *
146 *  This routine initializes the specified the thread.  It allocates
147 *  all memory associated with this thread.  It completes by adding
148 *  the thread to the local object table so operations on this
149 *  thread id are allowed.
150 *
151 *  @note If stack_area is NULL, it is allocated from the workspace.
152 *
153 *  @note If the stack is allocated from the workspace, then it is
154 *        guaranteed to be of at least minimum size.
155 */
156bool _Thread_Initialize(
157  Thread_Information                   *information,
158  Thread_Control                       *the_thread,
159  const struct _Scheduler_Control      *scheduler,
160  void                                 *stack_area,
161  size_t                                stack_size,
162  bool                                  is_fp,
163  Priority_Control                      priority,
164  bool                                  is_preemptible,
165  Thread_CPU_budget_algorithms          budget_algorithm,
166  Thread_CPU_budget_algorithm_callout   budget_callout,
167  uint32_t                              isr_level,
168  Objects_Name                          name
169);
170
171/**
172 *  @brief Initializes thread and executes it.
173 *
174 *  This routine initializes the executable information for a thread
175 *  and makes it ready to execute.  After this routine executes, the
176 *  thread competes with all other threads for CPU time.
177 *
178 *  @param the_thread The thread to be started.
179 *  @param entry The thread entry information.
180 */
181bool _Thread_Start(
182  Thread_Control                 *the_thread,
183  const Thread_Entry_information *entry,
184  ISR_lock_Context               *lock_context
185);
186
187void _Thread_Restart_self(
188  Thread_Control                 *executing,
189  const Thread_Entry_information *entry,
190  ISR_lock_Context               *lock_context
191) RTEMS_NO_RETURN;
192
193bool _Thread_Restart_other(
194  Thread_Control                 *the_thread,
195  const Thread_Entry_information *entry,
196  ISR_lock_Context               *lock_context
197);
198
199void _Thread_Yield( Thread_Control *executing );
200
201Thread_Life_state _Thread_Change_life(
202  Thread_Life_state clear,
203  Thread_Life_state set,
204  Thread_Life_state ignore
205);
206
207Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
208
209/**
210 * @brief Kills all zombie threads in the system.
211 *
212 * Threads change into the zombie state as the last step in the thread
213 * termination sequence right before a context switch to the heir thread is
214 * initiated.  Since the thread stack is still in use during this phase we have
215 * to postpone the thread stack reclamation until this point.  On SMP
216 * configurations we may have to busy wait for context switch completion here.
217 */
218void _Thread_Kill_zombies( void );
219
220void _Thread_Exit(
221  Thread_Control    *executing,
222  Thread_Life_state  set,
223  void              *exit_value
224);
225
226void _Thread_Join(
227  Thread_Control       *the_thread,
228  States_Control        waiting_for_join,
229  Thread_Control       *executing,
230  Thread_queue_Context *queue_context
231);
232
233void _Thread_Cancel(
234  Thread_Control *the_thread,
235  Thread_Control *executing,
236  void           *exit_value
237);
238
239typedef struct {
240  Thread_queue_Context  Base;
241  Thread_Control       *cancel;
242} Thread_Close_context;
243
244/**
245 * @brief Closes the thread.
246 *
247 * Closes the thread object and starts the thread termination sequence.  In
248 * case the executing thread is not terminated, then this function waits until
249 * the terminating thread reached the zombie state.
250 */
251void _Thread_Close(
252  Thread_Control       *the_thread,
253  Thread_Control       *executing,
254  Thread_Close_context *context
255);
256
257RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
258{
259  return _States_Is_ready( the_thread->current_state );
260}
261
262States_Control _Thread_Clear_state_locked(
263  Thread_Control *the_thread,
264  States_Control  state
265);
266
267/**
268 * @brief Clears the specified thread state.
269 *
270 * In case the previous state is a non-ready state and the next state is the
271 * ready state, then the thread is unblocked by the scheduler.
272 *
273 * @param[in] the_thread The thread.
274 * @param[in] state The state to clear.  It must not be zero.
275 *
276 * @return The previous state.
277 */
278States_Control _Thread_Clear_state(
279  Thread_Control *the_thread,
280  States_Control  state
281);
282
283States_Control _Thread_Set_state_locked(
284  Thread_Control *the_thread,
285  States_Control  state
286);
287
288/**
289 * @brief Sets the specified thread state.
290 *
291 * In case the previous state is the ready state, then the thread is blocked by
292 * the scheduler.
293 *
294 * @param[in] the_thread The thread.
295 * @param[in] state The state to set.  It must not be zero.
296 *
297 * @return The previous state.
298 */
299States_Control _Thread_Set_state(
300  Thread_Control *the_thread,
301  States_Control  state
302);
303
304/**
305 *  @brief Initializes enviroment for a thread.
306 *
307 *  This routine initializes the context of @a the_thread to its
308 *  appropriate starting state.
309 *
310 *  @param[in] the_thread is the pointer to the thread control block.
311 */
312void _Thread_Load_environment(
313  Thread_Control *the_thread
314);
315
316void _Thread_Entry_adaptor_idle( Thread_Control *executing );
317
318void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
319
320void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
321
322/**
323 *  @brief Wrapper function for all threads.
324 *
325 *  This routine is the wrapper function for all threads.  It is
326 *  the starting point for all threads.  The user provided thread
327 *  entry point is invoked by this routine.  Operations
328 *  which must be performed immediately before and after the user's
329 *  thread executes are found here.
330 *
331 *  @note On entry, it is assumed all interrupts are blocked and that this
332 *  routine needs to set the initial isr level.  This may or may not
333 *  actually be needed by the context switch routine and as a result
334 *  interrupts may already be at there proper level.  Either way,
335 *  setting the initial isr level properly here is safe.
336 */
337void _Thread_Handler( void );
338
339RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
340  Thread_Control   *the_thread,
341  ISR_lock_Context *lock_context
342)
343{
344  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
345}
346
347RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
348  Thread_Control   *the_thread,
349  ISR_lock_Context *lock_context
350)
351{
352  _ISR_lock_ISR_disable( lock_context );
353  _Thread_State_acquire_critical( the_thread, lock_context );
354}
355
356RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
357  ISR_lock_Context *lock_context
358)
359{
360  Thread_Control *executing;
361
362  _ISR_lock_ISR_disable( lock_context );
363  executing = _Thread_Executing;
364  _Thread_State_acquire_critical( executing, lock_context );
365
366  return executing;
367}
368
369RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
370  Thread_Control   *the_thread,
371  ISR_lock_Context *lock_context
372)
373{
374  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
375}
376
377RTEMS_INLINE_ROUTINE void _Thread_State_release(
378  Thread_Control   *the_thread,
379  ISR_lock_Context *lock_context
380)
381{
382  _Thread_State_release_critical( the_thread, lock_context );
383  _ISR_lock_ISR_enable( lock_context );
384}
385
386#if defined(RTEMS_DEBUG)
387RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
388  const Thread_Control *the_thread
389)
390{
391  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
392}
393#endif
394
395/**
396 * @brief Performs the priority actions specified by the thread queue context
397 * along the thread queue path.
398 *
399 * The caller must be the owner of the thread wait lock.
400 *
401 * @param start_of_path The start thread of the thread queue path.
402 * @param queue_context The thread queue context specifying the thread queue
403 *   path and initial thread priority actions.
404 *
405 * @see _Thread_queue_Path_acquire_critical().
406 */
407void _Thread_Priority_perform_actions(
408  Thread_Control       *start_of_path,
409  Thread_queue_Context *queue_context
410);
411
412/**
413 * @brief Adds the specified thread priority node to the corresponding thread
414 * priority aggregation.
415 *
416 * The caller must be the owner of the thread wait lock.
417 *
418 * @param the_thread The thread.
419 * @param priority_node The thread priority node to add.
420 * @param queue_context The thread queue context to return an updated set of
421 *   threads for _Thread_Priority_update().  The thread queue context must be
422 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
423 *   call of this function.
424 *
425 * @see _Thread_Wait_acquire().
426 */
427void _Thread_Priority_add(
428  Thread_Control       *the_thread,
429  Priority_Node        *priority_node,
430  Thread_queue_Context *queue_context
431);
432
433/**
434 * @brief Removes the specified thread priority node from the corresponding
435 * thread priority aggregation.
436 *
437 * The caller must be the owner of the thread wait lock.
438 *
439 * @param the_thread The thread.
440 * @param priority_node The thread priority node to remove.
441 * @param queue_context The thread queue context to return an updated set of
442 *   threads for _Thread_Priority_update().  The thread queue context must be
443 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
444 *   call of this function.
445 *
446 * @see _Thread_Wait_acquire().
447 */
448void _Thread_Priority_remove(
449  Thread_Control       *the_thread,
450  Priority_Node        *priority_node,
451  Thread_queue_Context *queue_context
452);
453
454/**
455 * @brief Propagates a thread priority value change in the specified thread
456 * priority node to the corresponding thread priority aggregation.
457 *
458 * The caller must be the owner of the thread wait lock.
459 *
460 * @param the_thread The thread.
461 * @param priority_node The thread priority node to change.
462 * @param prepend_it In case this is true, then the thread is prepended to
463 *   its priority group in its home scheduler instance, otherwise it is
464 *   appended.
465 * @param queue_context The thread queue context to return an updated set of
466 *   threads for _Thread_Priority_update().  The thread queue context must be
467 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
468 *   call of this function.
469 *
470 * @see _Thread_Wait_acquire().
471 */
472void _Thread_Priority_changed(
473  Thread_Control       *the_thread,
474  Priority_Node        *priority_node,
475  bool                  prepend_it,
476  Thread_queue_Context *queue_context
477);
478
479/**
480 * @brief Changes the thread priority value of the specified thread priority
481 * node in the corresponding thread priority aggregation.
482 *
483 * The caller must be the owner of the thread wait lock.
484 *
485 * @param the_thread The thread.
486 * @param priority_node The thread priority node to change.
487 * @param new_priority The new thread priority value of the thread priority
488 *   node to change.
489 * @param prepend_it In case this is true, then the thread is prepended to
490 *   its priority group in its home scheduler instance, otherwise it is
491 *   appended.
492 * @param queue_context The thread queue context to return an updated set of
493 *   threads for _Thread_Priority_update().  The thread queue context must be
494 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
495 *   call of this function.
496 *
497 * @see _Thread_Wait_acquire().
498 */
499RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
500  Thread_Control       *the_thread,
501  Priority_Node        *priority_node,
502  Priority_Control      new_priority,
503  bool                  prepend_it,
504  Thread_queue_Context *queue_context
505)
506{
507  _Priority_Node_set_priority( priority_node, new_priority );
508  _Thread_Priority_changed(
509    the_thread,
510    priority_node,
511    prepend_it,
512    queue_context
513  );
514}
515
516/**
517 * @brief Replaces the victim priority node with the replacement priority node
518 * in the corresponding thread priority aggregation.
519 *
520 * The caller must be the owner of the thread wait lock.
521 *
522 * @param the_thread The thread.
523 * @param victim_node The victim thread priority node.
524 * @param replacement_node The replacement thread priority node.
525 *
526 * @see _Thread_Wait_acquire().
527 */
528void _Thread_Priority_replace(
529  Thread_Control *the_thread,
530  Priority_Node  *victim_node,
531  Priority_Node  *replacement_node
532);
533
534/**
535 * @brief Adds a priority node to the corresponding thread priority
536 * aggregation.
537 *
538 * The caller must be the owner of the thread wait lock.
539 *
540 * @param the_thread The thread.
541 * @param priority_node The thread priority node to add.
542 * @param queue_context The thread queue context to return an updated set of
543 *   threads for _Thread_Priority_update().  The thread queue context must be
544 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
545 *   call of this function.
546 *
547 * @see _Thread_Priority_add(), _Thread_Priority_change(),
548 *   _Thread_Priority_changed() and _Thread_Priority_remove().
549 */
550void _Thread_Priority_update( Thread_queue_Context *queue_context );
551
552#if defined(RTEMS_SMP)
553void _Thread_Priority_and_sticky_update(
554  Thread_Control *the_thread,
555  int             sticky_level_change
556);
557#endif
558
559/**
560 * @brief Returns true if the left thread priority is less than the right
561 * thread priority in the intuitive sense of priority and false otherwise.
562 */
563RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
564  Priority_Control left,
565  Priority_Control right
566)
567{
568  return left > right;
569}
570
571/**
572 * @brief Returns the highest priority of the left and right thread priorities
573 * in the intuitive sense of priority.
574 */
575RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
576  Priority_Control left,
577  Priority_Control right
578)
579{
580  return _Thread_Priority_less_than( left, right ) ? right : left;
581}
582
583RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
584  Objects_Id id
585)
586{
587  uint32_t the_api;
588
589  the_api = _Objects_Get_API( id );
590
591  if ( !_Objects_Is_api_valid( the_api ) ) {
592    return NULL;
593  }
594
595  /*
596   * Threads are always first class :)
597   *
598   * There is no need to validate the object class of the object identifier,
599   * since this will be done by the object get methods.
600   */
601  return _Objects_Information_table[ the_api ][ 1 ];
602}
603
604/**
605 * @brief Gets a thread by its identifier.
606 *
607 * @see _Objects_Get().
608 */
609Thread_Control *_Thread_Get(
610  Objects_Id         id,
611  ISR_lock_Context  *lock_context
612);
613
614RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
615  const Thread_Control *thread
616)
617{
618#if defined(RTEMS_SMP)
619  return thread->Scheduler.cpu;
620#else
621  (void) thread;
622
623  return _Per_CPU_Get();
624#endif
625}
626
627RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
628  Thread_Control *thread,
629  Per_CPU_Control *cpu
630)
631{
632#if defined(RTEMS_SMP)
633  thread->Scheduler.cpu = cpu;
634#else
635  (void) thread;
636  (void) cpu;
637#endif
638}
639
640/**
641 * This function returns true if the_thread is the currently executing
642 * thread, and false otherwise.
643 */
644
645RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
646  const Thread_Control *the_thread
647)
648{
649  return ( the_thread == _Thread_Executing );
650}
651
652#if defined(RTEMS_SMP)
653/**
654 * @brief Returns @a true in case the thread executes currently on some
655 * processor in the system, otherwise @a false.
656 *
657 * Do not confuse this with _Thread_Is_executing() which checks only the
658 * current processor.
659 */
660RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
661  const Thread_Control *the_thread
662)
663{
664  return _CPU_Context_Get_is_executing( &the_thread->Registers );
665}
666#endif
667
668/**
669 * This function returns true if the_thread is the heir
670 * thread, and false otherwise.
671 */
672
673RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
674  const Thread_Control *the_thread
675)
676{
677  return ( the_thread == _Thread_Heir );
678}
679
680/**
681 * This routine clears any blocking state for the_thread.  It performs
682 * any necessary scheduling operations including the selection of
683 * a new heir thread.
684 */
685
686RTEMS_INLINE_ROUTINE void _Thread_Unblock (
687  Thread_Control *the_thread
688)
689{
690  _Thread_Clear_state( the_thread, STATES_BLOCKED );
691}
692
693/**
694 * This function returns true if the floating point context of
695 * the_thread is currently loaded in the floating point unit, and
696 * false otherwise.
697 */
698
699#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
700RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
701  const Thread_Control *the_thread
702)
703{
704  return ( the_thread == _Thread_Allocated_fp );
705}
706#endif
707
708/*
709 *  If the CPU has hardware floating point, then we must address saving
710 *  and restoring it as part of the context switch.
711 *
712 *  The second conditional compilation section selects the algorithm used
713 *  to context switch between floating point tasks.  The deferred algorithm
714 *  can be significantly better in a system with few floating point tasks
715 *  because it reduces the total number of save and restore FP context
716 *  operations.  However, this algorithm can not be used on all CPUs due
717 *  to unpredictable use of FP registers by some compilers for integer
718 *  operations.
719 */
720
721RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
722{
723#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
724#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
725  if ( executing->fp_context != NULL )
726    _Context_Save_fp( &executing->fp_context );
727#endif
728#endif
729}
730
731RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
732{
733#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
734#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
735  if ( (executing->fp_context != NULL) &&
736       !_Thread_Is_allocated_fp( executing ) ) {
737    if ( _Thread_Allocated_fp != NULL )
738      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
739    _Context_Restore_fp( &executing->fp_context );
740    _Thread_Allocated_fp = executing;
741  }
742#else
743  if ( executing->fp_context != NULL )
744    _Context_Restore_fp( &executing->fp_context );
745#endif
746#endif
747}
748
749/**
750 * This routine is invoked when the currently loaded floating
751 * point context is now longer associated with an active thread.
752 */
753
754#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
755RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
756{
757  _Thread_Allocated_fp = NULL;
758}
759#endif
760
761/**
762 * This function returns true if dispatching is disabled, and false
763 * otherwise.
764 */
765
766RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
767{
768  return ( _Thread_Dispatch_necessary );
769}
770
771/**
772 * This function returns true if the_thread is NULL and false otherwise.
773 */
774
775RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
776  const Thread_Control *the_thread
777)
778{
779  return ( the_thread == NULL );
780}
781
782/**
783 * @brief Is proxy blocking.
784 *
785 * status which indicates that a proxy is blocking, and false otherwise.
786 */
787RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
788  uint32_t   code
789)
790{
791  return (code == THREAD_STATUS_PROXY_BLOCKING);
792}
793
794RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
795{
796  /* Idle threads */
797  uint32_t maximum_internal_threads =
798    rtems_configuration_get_maximum_processors();
799
800  /* MPCI thread */
801#if defined(RTEMS_MULTIPROCESSING)
802  if ( _System_state_Is_multiprocessing ) {
803    ++maximum_internal_threads;
804  }
805#endif
806
807  return maximum_internal_threads;
808}
809
810RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
811{
812  return (Thread_Control *)
813    _Objects_Allocate_unprotected( &_Thread_Information.Objects );
814}
815
816/**
817 * @brief Gets the heir of the processor and makes it executing.
818 *
819 * Must be called with interrupts disabled.  The thread dispatch necessary
820 * indicator is cleared as a side-effect.
821 *
822 * @return The heir thread.
823 *
824 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
825 * _Thread_Dispatch_update_heir().
826 */
827RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
828  Per_CPU_Control *cpu_self
829)
830{
831  Thread_Control *heir;
832
833  heir = cpu_self->heir;
834  cpu_self->dispatch_necessary = false;
835  cpu_self->executing = heir;
836
837  return heir;
838}
839
840RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
841  Thread_Control  *the_thread,
842  Per_CPU_Control *cpu
843)
844{
845  Timestamp_Control last;
846  Timestamp_Control ran;
847
848  last = cpu->cpu_usage_timestamp;
849  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
850  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
851  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
852}
853
854#if defined( RTEMS_SMP )
855RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
856  Per_CPU_Control *cpu_self,
857  Per_CPU_Control *cpu_for_heir,
858  Thread_Control  *heir
859)
860{
861  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
862
863  cpu_for_heir->heir = heir;
864
865  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
866}
867#endif
868
869void _Thread_Get_CPU_time_used(
870  Thread_Control    *the_thread,
871  Timestamp_Control *cpu_time_used
872);
873
874RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
875  Thread_Action_control *action_control
876)
877{
878  _Chain_Initialize_empty( &action_control->Chain );
879}
880
881RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
882  Thread_Action *action
883)
884{
885  _Chain_Set_off_chain( &action->Node );
886}
887
888RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
889  Thread_Control        *the_thread,
890  Thread_Action         *action,
891  Thread_Action_handler  handler
892)
893{
894  Per_CPU_Control *cpu_of_thread;
895
896  _Assert( _Thread_State_is_owner( the_thread ) );
897
898  cpu_of_thread = _Thread_Get_CPU( the_thread );
899
900  action->handler = handler;
901
902  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
903
904  _Chain_Append_if_is_off_chain_unprotected(
905    &the_thread->Post_switch_actions.Chain,
906    &action->Node
907  );
908}
909
910RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
911  Thread_Life_state life_state
912)
913{
914  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
915}
916
917RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
918  Thread_Life_state life_state
919)
920{
921  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
922}
923
924RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
925  Thread_Life_state life_state
926)
927{
928  return ( life_state
929    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
930}
931
932RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
933  Thread_Life_state life_state
934)
935{
936  return ( life_state
937    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
938}
939
940RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
941  const Thread_Control *the_thread
942)
943{
944  _Assert( _Thread_State_is_owner( the_thread ) );
945  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
946}
947
948RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
949  Thread_Control *the_thread
950)
951{
952#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
953  ++the_thread->resource_count;
954#else
955  (void) the_thread;
956#endif
957}
958
959RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
960  Thread_Control *the_thread
961)
962{
963#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
964  --the_thread->resource_count;
965#else
966  (void) the_thread;
967#endif
968}
969
970#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
971/**
972 * @brief Returns true if the thread owns resources, and false otherwise.
973 *
974 * Resources are accounted with the Thread_Control::resource_count resource
975 * counter.  This counter is used by mutex objects for example.
976 *
977 * @param[in] the_thread The thread.
978 */
979RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
980  const Thread_Control *the_thread
981)
982{
983  return the_thread->resource_count != 0;
984}
985#endif
986
987#if defined(RTEMS_SMP)
988RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
989  Thread_Control  *the_thread,
990  Per_CPU_Control *cpu
991)
992{
993  _Per_CPU_Acquire( cpu );
994
995  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
996    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
997    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
998  }
999
1000  _Per_CPU_Release( cpu );
1001}
1002#endif
1003
1004RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
1005  const Thread_Control *the_thread
1006)
1007{
1008#if defined(RTEMS_SMP)
1009  return the_thread->Scheduler.home_scheduler;
1010#else
1011  (void) the_thread;
1012  return &_Scheduler_Table[ 0 ];
1013#endif
1014}
1015
1016RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1017  const Thread_Control *the_thread
1018)
1019{
1020#if defined(RTEMS_SMP)
1021  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1022  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1023    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1024  );
1025#else
1026  return the_thread->Scheduler.nodes;
1027#endif
1028}
1029
1030RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1031  const Thread_Control *the_thread,
1032  size_t                scheduler_index
1033)
1034{
1035#if defined(RTEMS_SMP)
1036  return (Scheduler_Node *)
1037    ( (uintptr_t) the_thread->Scheduler.nodes
1038      + scheduler_index * _Scheduler_Node_size );
1039#else
1040  _Assert( scheduler_index == 0 );
1041  (void) scheduler_index;
1042  return the_thread->Scheduler.nodes;
1043#endif
1044}
1045
1046#if defined(RTEMS_SMP)
1047RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1048  Thread_Control   *the_thread,
1049  ISR_lock_Context *lock_context
1050)
1051{
1052  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1053}
1054
1055RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1056  Thread_Control   *the_thread,
1057  ISR_lock_Context *lock_context
1058)
1059{
1060  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1061}
1062
1063void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1064
1065RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1066  Thread_Control         *the_thread,
1067  Scheduler_Node         *scheduler_node,
1068  Scheduler_Node_request  request
1069)
1070{
1071  ISR_lock_Context       lock_context;
1072  Scheduler_Node_request current_request;
1073
1074  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1075
1076  current_request = scheduler_node->Thread.request;
1077
1078  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1079    _Assert(
1080      request == SCHEDULER_NODE_REQUEST_ADD
1081        || request == SCHEDULER_NODE_REQUEST_REMOVE
1082    );
1083    _Assert( scheduler_node->Thread.next_request == NULL );
1084    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1085    the_thread->Scheduler.requests = scheduler_node;
1086  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1087    _Assert(
1088      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1089        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1090      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1091        && request == SCHEDULER_NODE_REQUEST_ADD )
1092    );
1093    request = SCHEDULER_NODE_REQUEST_NOTHING;
1094  }
1095
1096  scheduler_node->Thread.request = request;
1097
1098  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1099}
1100
1101RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1102  Thread_Control *the_thread,
1103  Scheduler_Node *scheduler_node
1104)
1105{
1106  _Chain_Append_unprotected(
1107    &the_thread->Scheduler.Wait_nodes,
1108    &scheduler_node->Thread.Wait_node
1109  );
1110  _Thread_Scheduler_add_request(
1111    the_thread,
1112    scheduler_node,
1113    SCHEDULER_NODE_REQUEST_ADD
1114  );
1115}
1116
1117RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1118  Thread_Control *the_thread,
1119  Scheduler_Node *scheduler_node
1120)
1121{
1122  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1123  _Thread_Scheduler_add_request(
1124    the_thread,
1125    scheduler_node,
1126    SCHEDULER_NODE_REQUEST_REMOVE
1127  );
1128}
1129#endif
1130
1131/**
1132 * @brief Returns the priority of the thread.
1133 *
1134 * Returns the user API and thread wait information relevant thread priority.
1135 * This includes temporary thread priority adjustments due to locking
1136 * protocols, a job release or the POSIX sporadic server for example.
1137 *
1138 * @return The priority of the thread.
1139 */
1140RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1141  const Thread_Control *the_thread
1142)
1143{
1144  Scheduler_Node *scheduler_node;
1145
1146  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1147  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1148}
1149
1150/**
1151 * @brief Acquires the thread wait default lock inside a critical section
1152 * (interrupts disabled).
1153 *
1154 * @param[in] the_thread The thread.
1155 * @param[in] lock_context The lock context used for the corresponding lock
1156 *   release.
1157 *
1158 * @see _Thread_Wait_release_default_critical().
1159 */
1160RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1161  Thread_Control   *the_thread,
1162  ISR_lock_Context *lock_context
1163)
1164{
1165  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1166}
1167
1168/**
1169 * @brief Acquires the thread wait default lock and returns the executing
1170 * thread.
1171 *
1172 * @param[in] lock_context The lock context used for the corresponding lock
1173 *   release.
1174 *
1175 * @return The executing thread.
1176 *
1177 * @see _Thread_Wait_release_default().
1178 */
1179RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1180  ISR_lock_Context *lock_context
1181)
1182{
1183  Thread_Control *executing;
1184
1185  _ISR_lock_ISR_disable( lock_context );
1186  executing = _Thread_Executing;
1187  _Thread_Wait_acquire_default_critical( executing, lock_context );
1188
1189  return executing;
1190}
1191
1192/**
1193 * @brief Acquires the thread wait default lock and disables interrupts.
1194 *
1195 * @param[in] the_thread The thread.
1196 * @param[in] lock_context The lock context used for the corresponding lock
1197 *   release.
1198 *
1199 * @see _Thread_Wait_release_default().
1200 */
1201RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1202  Thread_Control   *the_thread,
1203  ISR_lock_Context *lock_context
1204)
1205{
1206  _ISR_lock_ISR_disable( lock_context );
1207  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1208}
1209
1210/**
1211 * @brief Releases the thread wait default lock inside a critical section
1212 * (interrupts disabled).
1213 *
1214 * The previous interrupt status is not restored.
1215 *
1216 * @param[in] the_thread The thread.
1217 * @param[in] lock_context The lock context used for the corresponding lock
1218 *   acquire.
1219 */
1220RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1221  Thread_Control   *the_thread,
1222  ISR_lock_Context *lock_context
1223)
1224{
1225  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1226}
1227
1228/**
1229 * @brief Releases the thread wait default lock and restores the previous
1230 * interrupt status.
1231 *
1232 * @param[in] the_thread The thread.
1233 * @param[in] lock_context The lock context used for the corresponding lock
1234 *   acquire.
1235 */
1236RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1237  Thread_Control   *the_thread,
1238  ISR_lock_Context *lock_context
1239)
1240{
1241  _Thread_Wait_release_default_critical( the_thread, lock_context );
1242  _ISR_lock_ISR_enable( lock_context );
1243}
1244
1245#if defined(RTEMS_SMP)
1246#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1247  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1248
1249RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1250  Thread_Control            *the_thread,
1251  Thread_queue_Lock_context *queue_lock_context
1252)
1253{
1254  Chain_Node *first;
1255
1256  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1257  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1258
1259  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1260    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1261  }
1262}
1263
1264RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1265  Thread_queue_Queue        *queue,
1266  Thread_queue_Lock_context *queue_lock_context
1267)
1268{
1269  _Thread_queue_Queue_acquire_critical(
1270    queue,
1271    &_Thread_Executing->Potpourri_stats,
1272    &queue_lock_context->Lock_context
1273  );
1274}
1275
1276RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1277  Thread_queue_Queue        *queue,
1278  Thread_queue_Lock_context *queue_lock_context
1279)
1280{
1281  _Thread_queue_Queue_release_critical(
1282    queue,
1283    &queue_lock_context->Lock_context
1284  );
1285}
1286#endif
1287
1288/**
1289 * @brief Acquires the thread wait lock inside a critical section (interrupts
1290 * disabled).
1291 *
1292 * @param[in] the_thread The thread.
1293 * @param[in] queue_context The thread queue context for the corresponding
1294 *   _Thread_Wait_release_critical().
1295 */
1296RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1297  Thread_Control       *the_thread,
1298  Thread_queue_Context *queue_context
1299)
1300{
1301#if defined(RTEMS_SMP)
1302  Thread_queue_Queue *queue;
1303
1304  _Thread_Wait_acquire_default_critical(
1305    the_thread,
1306    &queue_context->Lock_context.Lock_context
1307  );
1308
1309  queue = the_thread->Wait.queue;
1310  queue_context->Lock_context.Wait.queue = queue;
1311
1312  if ( queue != NULL ) {
1313    _Thread_queue_Gate_add(
1314      &the_thread->Wait.Lock.Pending_requests,
1315      &queue_context->Lock_context.Wait.Gate
1316    );
1317    _Thread_Wait_release_default_critical(
1318      the_thread,
1319      &queue_context->Lock_context.Lock_context
1320    );
1321    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1322
1323    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1324      _Thread_Wait_release_queue_critical(
1325        queue,
1326        &queue_context->Lock_context
1327      );
1328      _Thread_Wait_acquire_default_critical(
1329        the_thread,
1330        &queue_context->Lock_context.Lock_context
1331      );
1332      _Thread_Wait_remove_request_locked(
1333        the_thread,
1334        &queue_context->Lock_context
1335      );
1336      _Assert( the_thread->Wait.queue == NULL );
1337    }
1338  }
1339#else
1340  (void) the_thread;
1341  (void) queue_context;
1342#endif
1343}
1344
1345/**
1346 * @brief Acquires the thread wait default lock and disables interrupts.
1347 *
1348 * @param[in] the_thread The thread.
1349 * @param[in] queue_context The thread queue context for the corresponding
1350 *   _Thread_Wait_release().
1351 */
1352RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1353  Thread_Control       *the_thread,
1354  Thread_queue_Context *queue_context
1355)
1356{
1357  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1358  _Thread_Wait_acquire_critical( the_thread, queue_context );
1359}
1360
1361/**
1362 * @brief Releases the thread wait lock inside a critical section (interrupts
1363 * disabled).
1364 *
1365 * The previous interrupt status is not restored.
1366 *
1367 * @param[in] the_thread The thread.
1368 * @param[in] queue_context The thread queue context used for corresponding
1369 *   _Thread_Wait_acquire_critical().
1370 */
1371RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1372  Thread_Control       *the_thread,
1373  Thread_queue_Context *queue_context
1374)
1375{
1376#if defined(RTEMS_SMP)
1377  Thread_queue_Queue *queue;
1378
1379  queue = queue_context->Lock_context.Wait.queue;
1380
1381  if ( queue != NULL ) {
1382    _Thread_Wait_release_queue_critical(
1383      queue, &queue_context->Lock_context
1384    );
1385    _Thread_Wait_acquire_default_critical(
1386      the_thread,
1387      &queue_context->Lock_context.Lock_context
1388    );
1389    _Thread_Wait_remove_request_locked(
1390      the_thread,
1391      &queue_context->Lock_context
1392    );
1393  }
1394
1395  _Thread_Wait_release_default_critical(
1396    the_thread,
1397    &queue_context->Lock_context.Lock_context
1398  );
1399#else
1400  (void) the_thread;
1401  (void) queue_context;
1402#endif
1403}
1404
1405/**
1406 * @brief Releases the thread wait lock and restores the previous interrupt
1407 * status.
1408 *
1409 * @param[in] the_thread The thread.
1410 * @param[in] queue_context The thread queue context used for corresponding
1411 *   _Thread_Wait_acquire().
1412 */
1413RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1414  Thread_Control       *the_thread,
1415  Thread_queue_Context *queue_context
1416)
1417{
1418  _Thread_Wait_release_critical( the_thread, queue_context );
1419  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1420}
1421
1422/**
1423 * @brief Claims the thread wait queue.
1424 *
1425 * The caller must not be the owner of the default thread wait lock.  The
1426 * caller must be the owner of the corresponding thread queue lock.  The
1427 * registration of the corresponding thread queue operations is deferred and
1428 * done after the deadlock detection.  This is crucial to support timeouts on
1429 * SMP configurations.
1430 *
1431 * @param[in] the_thread The thread.
1432 * @param[in] queue The new thread queue.
1433 *
1434 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
1435 */
1436RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1437  Thread_Control     *the_thread,
1438  Thread_queue_Queue *queue
1439)
1440{
1441  ISR_lock_Context lock_context;
1442
1443  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1444
1445  _Assert( the_thread->Wait.queue == NULL );
1446
1447#if defined(RTEMS_SMP)
1448  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1449  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1450  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1451#endif
1452
1453  the_thread->Wait.queue = queue;
1454
1455  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1456}
1457
1458/**
1459 * @brief Finalizes the thread wait queue claim via registration of the
1460 * corresponding thread queue operations.
1461 *
1462 * @param[in] the_thread The thread.
1463 * @param[in] operations The corresponding thread queue operations.
1464 */
1465RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
1466  Thread_Control                *the_thread,
1467  const Thread_queue_Operations *operations
1468)
1469{
1470  the_thread->Wait.operations = operations;
1471}
1472
1473/**
1474 * @brief Removes a thread wait lock request.
1475 *
1476 * On SMP configurations, removes a thread wait lock request.
1477 *
1478 * On other configurations, this function does nothing.
1479 *
1480 * @param[in] the_thread The thread.
1481 * @param[in] queue_lock_context The thread queue lock context used for
1482 *   corresponding _Thread_Wait_acquire().
1483 */
1484RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1485  Thread_Control            *the_thread,
1486  Thread_queue_Lock_context *queue_lock_context
1487)
1488{
1489#if defined(RTEMS_SMP)
1490  ISR_lock_Context lock_context;
1491
1492  _Thread_Wait_acquire_default( the_thread, &lock_context );
1493  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1494  _Thread_Wait_release_default( the_thread, &lock_context );
1495#else
1496  (void) the_thread;
1497  (void) queue_lock_context;
1498#endif
1499}
1500
1501/**
1502 * @brief Restores the default thread wait queue and operations.
1503 *
1504 * The caller must be the owner of the current thread wait queue lock.
1505 *
1506 * On SMP configurations, the pending requests are updated to use the stale
1507 * thread queue operations.
1508 *
1509 * @param[in] the_thread The thread.
1510 *
1511 * @see _Thread_Wait_claim().
1512 */
1513RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1514  Thread_Control *the_thread
1515)
1516{
1517#if defined(RTEMS_SMP)
1518  ISR_lock_Context  lock_context;
1519  Chain_Node       *node;
1520  const Chain_Node *tail;
1521
1522  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1523
1524  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1525  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1526
1527  if ( node != tail ) {
1528    do {
1529      Thread_queue_Context *queue_context;
1530
1531      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1532      queue_context->Lock_context.Wait.queue = NULL;
1533
1534      node = _Chain_Next( node );
1535    } while ( node != tail );
1536
1537    _Thread_queue_Gate_add(
1538      &the_thread->Wait.Lock.Pending_requests,
1539      &the_thread->Wait.Lock.Tranquilizer
1540    );
1541  } else {
1542    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1543  }
1544#endif
1545
1546  the_thread->Wait.queue = NULL;
1547  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1548
1549#if defined(RTEMS_SMP)
1550  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1551#endif
1552}
1553
1554/**
1555 * @brief Tranquilizes the thread after a wait on a thread queue.
1556 *
1557 * After the violent blocking procedure this function makes the thread calm and
1558 * peaceful again so that it can carry out its normal work.
1559 *
1560 * On SMP configurations, ensures that all pending thread wait lock requests
1561 * completed before the thread is able to begin a new thread wait procedure.
1562 *
1563 * On other configurations, this function does nothing.
1564 *
1565 * It must be called after a _Thread_Wait_claim() exactly once
1566 *  - after the corresponding thread queue lock was released, and
1567 *  - the default wait state is restored or some other processor is about to do
1568 *    this.
1569 *
1570 * @param[in] the_thread The thread.
1571 */
1572RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1573  Thread_Control *the_thread
1574)
1575{
1576#if defined(RTEMS_SMP)
1577  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1578#else
1579  (void) the_thread;
1580#endif
1581}
1582
1583/**
1584 * @brief Cancels a thread wait on a thread queue.
1585 *
1586 * @param[in] the_thread The thread.
1587 * @param[in] queue_context The thread queue context used for corresponding
1588 *   _Thread_Wait_acquire().
1589 */
1590RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1591  Thread_Control       *the_thread,
1592  Thread_queue_Context *queue_context
1593)
1594{
1595  Thread_queue_Queue *queue;
1596
1597  queue = the_thread->Wait.queue;
1598
1599#if defined(RTEMS_SMP)
1600  if ( queue != NULL ) {
1601    _Assert( queue_context->Lock_context.Wait.queue == queue );
1602#endif
1603
1604    ( *the_thread->Wait.operations->extract )(
1605      queue,
1606      the_thread,
1607      queue_context
1608    );
1609    _Thread_Wait_restore_default( the_thread );
1610
1611#if defined(RTEMS_SMP)
1612    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1613    queue_context->Lock_context.Wait.queue = queue;
1614  }
1615#endif
1616}
1617
1618/**
1619 * @brief The initial thread wait flags value set by _Thread_Initialize().
1620 */
1621#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1622
1623/**
1624 * @brief Mask to get the thread wait state flags.
1625 */
1626#define THREAD_WAIT_STATE_MASK 0xffU
1627
1628/**
1629 * @brief Indicates that the thread begins with the blocking operation.
1630 *
1631 * A blocking operation consists of an optional watchdog initialization and the
1632 * setting of the appropriate thread blocking state with the corresponding
1633 * scheduler block operation.
1634 */
1635#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1636
1637/**
1638 * @brief Indicates that the thread completed the blocking operation.
1639 */
1640#define THREAD_WAIT_STATE_BLOCKED 0x2U
1641
1642/**
1643 * @brief Indicates that a condition to end the thread wait occurred.
1644 *
1645 * This could be a timeout, a signal, an event or a resource availability.
1646 */
1647#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1648
1649/**
1650 * @brief Mask to get the thread wait class flags.
1651 */
1652#define THREAD_WAIT_CLASS_MASK 0xff00U
1653
1654/**
1655 * @brief Indicates that the thread waits for an event.
1656 */
1657#define THREAD_WAIT_CLASS_EVENT 0x100U
1658
1659/**
1660 * @brief Indicates that the thread waits for a system event.
1661 */
1662#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1663
1664/**
1665 * @brief Indicates that the thread waits for an object.
1666 */
1667#define THREAD_WAIT_CLASS_OBJECT 0x400U
1668
1669/**
1670 * @brief Indicates that the thread waits for a period.
1671 */
1672#define THREAD_WAIT_CLASS_PERIOD 0x800U
1673
1674RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1675  Thread_Control    *the_thread,
1676  Thread_Wait_flags  flags
1677)
1678{
1679#if defined(RTEMS_SMP)
1680  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1681#else
1682  the_thread->Wait.flags = flags;
1683#endif
1684}
1685
1686RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1687  const Thread_Control *the_thread
1688)
1689{
1690#if defined(RTEMS_SMP)
1691  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1692#else
1693  return the_thread->Wait.flags;
1694#endif
1695}
1696
1697RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
1698  const Thread_Control *the_thread
1699)
1700{
1701#if defined(RTEMS_SMP)
1702  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
1703#else
1704  return the_thread->Wait.flags;
1705#endif
1706}
1707
1708/**
1709 * @brief Tries to change the thread wait flags with release semantics in case
1710 * of success.
1711 *
1712 * Must be called inside a critical section (interrupts disabled).
1713 *
1714 * In case the wait flags are equal to the expected wait flags, then the wait
1715 * flags are set to the desired wait flags.
1716 *
1717 * @param[in] the_thread The thread.
1718 * @param[in] expected_flags The expected wait flags.
1719 * @param[in] desired_flags The desired wait flags.
1720 *
1721 * @retval true The wait flags were equal to the expected wait flags.
1722 * @retval false Otherwise.
1723 */
1724RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1725  Thread_Control    *the_thread,
1726  Thread_Wait_flags  expected_flags,
1727  Thread_Wait_flags  desired_flags
1728)
1729{
1730  _Assert( _ISR_Get_level() != 0 );
1731
1732#if defined(RTEMS_SMP)
1733  return _Atomic_Compare_exchange_uint(
1734    &the_thread->Wait.flags,
1735    &expected_flags,
1736    desired_flags,
1737    ATOMIC_ORDER_RELEASE,
1738    ATOMIC_ORDER_RELAXED
1739  );
1740#else
1741  bool success = ( the_thread->Wait.flags == expected_flags );
1742
1743  if ( success ) {
1744    the_thread->Wait.flags = desired_flags;
1745  }
1746
1747  return success;
1748#endif
1749}
1750
1751/**
1752 * @brief Tries to change the thread wait flags with acquire semantics.
1753 *
1754 * In case the wait flags are equal to the expected wait flags, then the wait
1755 * flags are set to the desired wait flags.
1756 *
1757 * @param[in] the_thread The thread.
1758 * @param[in] expected_flags The expected wait flags.
1759 * @param[in] desired_flags The desired wait flags.
1760 *
1761 * @retval true The wait flags were equal to the expected wait flags.
1762 * @retval false Otherwise.
1763 */
1764RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1765  Thread_Control    *the_thread,
1766  Thread_Wait_flags  expected_flags,
1767  Thread_Wait_flags  desired_flags
1768)
1769{
1770#if defined(RTEMS_SMP)
1771  return _Atomic_Compare_exchange_uint(
1772    &the_thread->Wait.flags,
1773    &expected_flags,
1774    desired_flags,
1775    ATOMIC_ORDER_ACQUIRE,
1776    ATOMIC_ORDER_ACQUIRE
1777  );
1778#else
1779  bool      success;
1780  ISR_Level level;
1781
1782  _ISR_Local_disable( level );
1783
1784  success = _Thread_Wait_flags_try_change_release(
1785    the_thread,
1786    expected_flags,
1787    desired_flags
1788  );
1789
1790  _ISR_Local_enable( level );
1791  return success;
1792#endif
1793}
1794
1795/**
1796 * @brief Returns the object identifier of the object containing the current
1797 * thread wait queue.
1798 *
1799 * This function may be used for debug and system information purposes.  The
1800 * caller must be the owner of the thread lock.
1801 *
1802 * @retval 0 The thread waits on no thread queue currently, the thread wait
1803 *   queue is not contained in an object, or the current thread state provides
1804 *   insufficient information, e.g. the thread is in the middle of a blocking
1805 *   operation.
1806 * @retval other The object identifier of the object containing the thread wait
1807 *   queue.
1808 */
1809Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1810
1811RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1812  const Thread_Control *the_thread
1813)
1814{
1815  return (Status_Control) the_thread->Wait.return_code;
1816}
1817
1818/**
1819 * @brief Cancels a blocking operation so that the thread can continue its
1820 * execution.
1821 *
1822 * In case this function actually cancelled the blocking operation, then the
1823 * thread wait return code is set to the specified status.
1824 *
1825 * A specialization of this function is _Thread_Timeout().
1826 *
1827 * @param[in] the_thread The thread.
1828 * @param[in] status The thread wait status.
1829 */
1830void _Thread_Continue( Thread_Control *the_thread, Status_Control status );
1831
1832/**
1833 * @brief General purpose thread wait timeout.
1834 *
1835 * @param[in] the_watchdog The thread timer watchdog.
1836 */
1837void _Thread_Timeout( Watchdog_Control *the_watchdog );
1838
1839RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1840  Thread_Timer_information *timer,
1841  Per_CPU_Control          *cpu
1842)
1843{
1844  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1845  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
1846  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1847}
1848
1849RTEMS_INLINE_ROUTINE void _Thread_Add_timeout_ticks(
1850  Thread_Control    *the_thread,
1851  Per_CPU_Control   *cpu,
1852  Watchdog_Interval  ticks
1853)
1854{
1855  ISR_lock_Context lock_context;
1856
1857  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1858
1859  the_thread->Timer.header =
1860    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
1861  the_thread->Timer.Watchdog.routine = _Thread_Timeout;
1862  _Watchdog_Per_CPU_insert_ticks( &the_thread->Timer.Watchdog, cpu, ticks );
1863
1864  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1865}
1866
1867RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_realtime(
1868  Thread_Control                 *the_thread,
1869  Per_CPU_Control                *cpu,
1870  Watchdog_Service_routine_entry  routine,
1871  uint64_t                        expire
1872)
1873{
1874  ISR_lock_Context  lock_context;
1875  Watchdog_Header  *header;
1876
1877  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1878
1879  header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
1880  the_thread->Timer.header = header;
1881  the_thread->Timer.Watchdog.routine = routine;
1882  _Watchdog_Per_CPU_insert( &the_thread->Timer.Watchdog, cpu, header, expire );
1883
1884  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1885}
1886
1887RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1888{
1889  ISR_lock_Context lock_context;
1890
1891  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1892
1893  _Watchdog_Per_CPU_remove(
1894    &the_thread->Timer.Watchdog,
1895#if defined(RTEMS_SMP)
1896    the_thread->Timer.Watchdog.cpu,
1897#else
1898    _Per_CPU_Get(),
1899#endif
1900    the_thread->Timer.header
1901  );
1902
1903  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1904}
1905
1906RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1907  Thread_Control     *the_thread,
1908  Thread_queue_Queue *queue
1909)
1910{
1911  _Thread_Wait_tranquilize( the_thread );
1912  _Thread_Timer_remove( the_thread );
1913
1914#if defined(RTEMS_MULTIPROCESSING)
1915  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1916    _Thread_Unblock( the_thread );
1917  } else {
1918    _Thread_queue_Unblock_proxy( queue, the_thread );
1919  }
1920#else
1921  (void) queue;
1922  _Thread_Unblock( the_thread );
1923#endif
1924}
1925
1926Status_Control _Thread_Set_name(
1927  Thread_Control *the_thread,
1928  const char     *name
1929);
1930
1931size_t _Thread_Get_name(
1932  const Thread_Control *the_thread,
1933  char                 *buffer,
1934  size_t                buffer_size
1935);
1936
1937#if defined(RTEMS_SMP)
1938#define THREAD_PIN_STEP 2
1939
1940#define THREAD_PIN_PREEMPTION 1
1941
1942void _Thread_Do_unpin(
1943  Thread_Control  *executing,
1944  Per_CPU_Control *cpu_self
1945);
1946#endif
1947
1948RTEMS_INLINE_ROUTINE void _Thread_Pin( Thread_Control *executing )
1949{
1950#if defined(RTEMS_SMP)
1951  _Assert( executing == _Thread_Executing );
1952
1953  executing->Scheduler.pin_level += THREAD_PIN_STEP;
1954#else
1955  (void) executing;
1956#endif
1957}
1958
1959RTEMS_INLINE_ROUTINE void _Thread_Unpin(
1960  Thread_Control  *executing,
1961  Per_CPU_Control *cpu_self
1962)
1963{
1964#if defined(RTEMS_SMP)
1965  unsigned int pin_level;
1966
1967  _Assert( executing == _Thread_Executing );
1968
1969  pin_level = executing->Scheduler.pin_level;
1970  _Assert( pin_level > 0 );
1971
1972  if (
1973    RTEMS_PREDICT_TRUE(
1974      pin_level != ( THREAD_PIN_STEP | THREAD_PIN_PREEMPTION )
1975    )
1976  ) {
1977    executing->Scheduler.pin_level = pin_level - THREAD_PIN_STEP;
1978  } else {
1979    _Thread_Do_unpin( executing, cpu_self );
1980  }
1981#else
1982  (void) executing;
1983  (void) cpu_self;
1984#endif
1985}
1986
1987/** @}*/
1988
1989#ifdef __cplusplus
1990}
1991#endif
1992
1993#if defined(RTEMS_MULTIPROCESSING)
1994#include <rtems/score/threadmp.h>
1995#endif
1996
1997#endif
1998/* end of include file */
Note: See TracBrowser for help on using the repository browser.