source: rtems/cpukit/include/rtems/score/threadimpl.h @ ef23838

Last change on this file since ef23838 was ef23838, checked in by Sebastian Huber <sebastian.huber@…>, on Dec 3, 2018 at 12:10:21 PM

score: Avoid sbintime_t in API headers

The sbintime_t is a non-POSIX type and not visible if strict standard
options are selected.

Move implementation details from <rtems/score/timestamp.h> to
<rtems/score/timestampimpl.h>.

Update #3598.

  • Property mode set to 100644
File size: 52.3 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2017 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/schedulernodeimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/timestampimpl.h>
35#include <rtems/score/threadqimpl.h>
36#include <rtems/score/todimpl.h>
37#include <rtems/score/freechain.h>
38#include <rtems/score/watchdogimpl.h>
39#include <rtems/config.h>
40
41#ifdef __cplusplus
42extern "C" {
43#endif
44
45/**
46 * @addtogroup ScoreThread
47 */
48/**@{**/
49
50/**
51 *  The following structure contains the information necessary to manage
52 *  a thread which it is  waiting for a resource.
53 */
54#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
55
56/**
57 *  Self for the GNU Ada Run-Time
58 */
59extern void *rtems_ada_self;
60
61typedef struct {
62  Objects_Information Objects;
63
64  Freechain_Control Free_thread_queue_heads;
65} Thread_Information;
66
67/**
68 *  The following defines the information control block used to
69 *  manage this class of objects.
70 */
71extern Thread_Information _Thread_Internal_information;
72
73/**
74 * @brief Object identifier of the global constructor thread.
75 *
76 * This variable is set by _RTEMS_tasks_Initialize_user_tasks_body() or
77 * _POSIX_Threads_Initialize_user_threads_body().
78 *
79 * It is consumed by _Thread_Handler().
80 */
81extern Objects_Id _Thread_Global_constructor;
82
83/**
84 *  The following points to the thread whose floating point
85 *  context is currently loaded.
86 */
87#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
88extern Thread_Control *_Thread_Allocated_fp;
89#endif
90
91#if defined(RTEMS_SMP)
92#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
93  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
94#endif
95
96typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
97
98void _Thread_Iterate(
99  Thread_Visitor  visitor,
100  void           *arg
101);
102
103void _Thread_Initialize_information(
104  Thread_Information  *information,
105  Objects_APIs         the_api,
106  uint16_t             the_class,
107  uint32_t             maximum
108);
109
110/**
111 *  @brief Initialize thread handler.
112 *
113 *  This routine performs the initialization necessary for this handler.
114 */
115void _Thread_Handler_initialization(void);
116
117/**
118 *  @brief Create idle thread.
119 *
120 *  This routine creates the idle thread.
121 *
122 *  @warning No thread should be created before this one.
123 */
124void _Thread_Create_idle(void);
125
126/**
127 *  @brief Start thread multitasking.
128 *
129 *  This routine initiates multitasking.  It is invoked only as
130 *  part of initialization and its invocation is the last act of
131 *  the non-multitasking part of the system initialization.
132 */
133void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
134
135/**
136 *  @brief Allocate the requested stack space for the thread.
137 *
138 *  Allocate the requested stack space for the thread.
139 *  Set the Start.stack field to the address of the stack.
140 *
141 *  @param[in] the_thread is the thread where the stack space is requested
142 *  @param[in] stack_size is the stack space is requested
143 *
144 *  @retval actual size allocated after any adjustment
145 *  @retval zero if the allocation failed
146 */
147size_t _Thread_Stack_Allocate(
148  Thread_Control *the_thread,
149  size_t          stack_size
150);
151
152/**
153 *  @brief Deallocate thread stack.
154 *
155 *  Deallocate the Thread's stack.
156 */
157void _Thread_Stack_Free(
158  Thread_Control *the_thread
159);
160
161/**
162 *  @brief Initialize thread.
163 *
164 *  This routine initializes the specified the thread.  It allocates
165 *  all memory associated with this thread.  It completes by adding
166 *  the thread to the local object table so operations on this
167 *  thread id are allowed.
168 *
169 *  @note If stack_area is NULL, it is allocated from the workspace.
170 *
171 *  @note If the stack is allocated from the workspace, then it is
172 *        guaranteed to be of at least minimum size.
173 */
174bool _Thread_Initialize(
175  Thread_Information                   *information,
176  Thread_Control                       *the_thread,
177  const struct _Scheduler_Control      *scheduler,
178  void                                 *stack_area,
179  size_t                                stack_size,
180  bool                                  is_fp,
181  Priority_Control                      priority,
182  bool                                  is_preemptible,
183  Thread_CPU_budget_algorithms          budget_algorithm,
184  Thread_CPU_budget_algorithm_callout   budget_callout,
185  uint32_t                              isr_level,
186  Objects_Name                          name
187);
188
189/**
190 *  @brief Initializes thread and executes it.
191 *
192 *  This routine initializes the executable information for a thread
193 *  and makes it ready to execute.  After this routine executes, the
194 *  thread competes with all other threads for CPU time.
195 *
196 *  @param the_thread The thread to be started.
197 *  @param entry The thread entry information.
198 */
199bool _Thread_Start(
200  Thread_Control                 *the_thread,
201  const Thread_Entry_information *entry,
202  ISR_lock_Context               *lock_context
203);
204
205void _Thread_Restart_self(
206  Thread_Control                 *executing,
207  const Thread_Entry_information *entry,
208  ISR_lock_Context               *lock_context
209) RTEMS_NO_RETURN;
210
211bool _Thread_Restart_other(
212  Thread_Control                 *the_thread,
213  const Thread_Entry_information *entry,
214  ISR_lock_Context               *lock_context
215);
216
217void _Thread_Yield( Thread_Control *executing );
218
219Thread_Life_state _Thread_Change_life(
220  Thread_Life_state clear,
221  Thread_Life_state set,
222  Thread_Life_state ignore
223);
224
225Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
226
227/**
228 * @brief Kills all zombie threads in the system.
229 *
230 * Threads change into the zombie state as the last step in the thread
231 * termination sequence right before a context switch to the heir thread is
232 * initiated.  Since the thread stack is still in use during this phase we have
233 * to postpone the thread stack reclamation until this point.  On SMP
234 * configurations we may have to busy wait for context switch completion here.
235 */
236void _Thread_Kill_zombies( void );
237
238void _Thread_Exit(
239  Thread_Control    *executing,
240  Thread_Life_state  set,
241  void              *exit_value
242);
243
244void _Thread_Join(
245  Thread_Control       *the_thread,
246  States_Control        waiting_for_join,
247  Thread_Control       *executing,
248  Thread_queue_Context *queue_context
249);
250
251void _Thread_Cancel(
252  Thread_Control *the_thread,
253  Thread_Control *executing,
254  void           *exit_value
255);
256
257typedef struct {
258  Thread_queue_Context  Base;
259  Thread_Control       *cancel;
260} Thread_Close_context;
261
262/**
263 * @brief Closes the thread.
264 *
265 * Closes the thread object and starts the thread termination sequence.  In
266 * case the executing thread is not terminated, then this function waits until
267 * the terminating thread reached the zombie state.
268 */
269void _Thread_Close(
270  Thread_Control       *the_thread,
271  Thread_Control       *executing,
272  Thread_Close_context *context
273);
274
275RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
276{
277  return _States_Is_ready( the_thread->current_state );
278}
279
280States_Control _Thread_Clear_state_locked(
281  Thread_Control *the_thread,
282  States_Control  state
283);
284
285/**
286 * @brief Clears the specified thread state.
287 *
288 * In case the previous state is a non-ready state and the next state is the
289 * ready state, then the thread is unblocked by the scheduler.
290 *
291 * @param[in] the_thread The thread.
292 * @param[in] state The state to clear.  It must not be zero.
293 *
294 * @return The previous state.
295 */
296States_Control _Thread_Clear_state(
297  Thread_Control *the_thread,
298  States_Control  state
299);
300
301States_Control _Thread_Set_state_locked(
302  Thread_Control *the_thread,
303  States_Control  state
304);
305
306/**
307 * @brief Sets the specified thread state.
308 *
309 * In case the previous state is the ready state, then the thread is blocked by
310 * the scheduler.
311 *
312 * @param[in] the_thread The thread.
313 * @param[in] state The state to set.  It must not be zero.
314 *
315 * @return The previous state.
316 */
317States_Control _Thread_Set_state(
318  Thread_Control *the_thread,
319  States_Control  state
320);
321
322/**
323 *  @brief Initializes enviroment for a thread.
324 *
325 *  This routine initializes the context of @a the_thread to its
326 *  appropriate starting state.
327 *
328 *  @param[in] the_thread is the pointer to the thread control block.
329 */
330void _Thread_Load_environment(
331  Thread_Control *the_thread
332);
333
334void _Thread_Entry_adaptor_idle( Thread_Control *executing );
335
336void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
337
338void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
339
340/**
341 *  @brief Wrapper function for all threads.
342 *
343 *  This routine is the wrapper function for all threads.  It is
344 *  the starting point for all threads.  The user provided thread
345 *  entry point is invoked by this routine.  Operations
346 *  which must be performed immediately before and after the user's
347 *  thread executes are found here.
348 *
349 *  @note On entry, it is assumed all interrupts are blocked and that this
350 *  routine needs to set the initial isr level.  This may or may not
351 *  actually be needed by the context switch routine and as a result
352 *  interrupts may already be at there proper level.  Either way,
353 *  setting the initial isr level properly here is safe.
354 */
355void _Thread_Handler( void );
356
357RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
358  Thread_Control   *the_thread,
359  ISR_lock_Context *lock_context
360)
361{
362  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
363}
364
365RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
366  Thread_Control   *the_thread,
367  ISR_lock_Context *lock_context
368)
369{
370  _ISR_lock_ISR_disable( lock_context );
371  _Thread_State_acquire_critical( the_thread, lock_context );
372}
373
374RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
375  ISR_lock_Context *lock_context
376)
377{
378  Thread_Control *executing;
379
380  _ISR_lock_ISR_disable( lock_context );
381  executing = _Thread_Executing;
382  _Thread_State_acquire_critical( executing, lock_context );
383
384  return executing;
385}
386
387RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
388  Thread_Control   *the_thread,
389  ISR_lock_Context *lock_context
390)
391{
392  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
393}
394
395RTEMS_INLINE_ROUTINE void _Thread_State_release(
396  Thread_Control   *the_thread,
397  ISR_lock_Context *lock_context
398)
399{
400  _Thread_State_release_critical( the_thread, lock_context );
401  _ISR_lock_ISR_enable( lock_context );
402}
403
404#if defined(RTEMS_DEBUG)
405RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
406  const Thread_Control *the_thread
407)
408{
409  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
410}
411#endif
412
413/**
414 * @brief Performs the priority actions specified by the thread queue context
415 * along the thread queue path.
416 *
417 * The caller must be the owner of the thread wait lock.
418 *
419 * @param start_of_path The start thread of the thread queue path.
420 * @param queue_context The thread queue context specifying the thread queue
421 *   path and initial thread priority actions.
422 *
423 * @see _Thread_queue_Path_acquire_critical().
424 */
425void _Thread_Priority_perform_actions(
426  Thread_Control       *start_of_path,
427  Thread_queue_Context *queue_context
428);
429
430/**
431 * @brief Adds the specified thread priority node to the corresponding thread
432 * priority aggregation.
433 *
434 * The caller must be the owner of the thread wait lock.
435 *
436 * @param the_thread The thread.
437 * @param priority_node The thread priority node to add.
438 * @param queue_context The thread queue context to return an updated set of
439 *   threads for _Thread_Priority_update().  The thread queue context must be
440 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
441 *   call of this function.
442 *
443 * @see _Thread_Wait_acquire().
444 */
445void _Thread_Priority_add(
446  Thread_Control       *the_thread,
447  Priority_Node        *priority_node,
448  Thread_queue_Context *queue_context
449);
450
451/**
452 * @brief Removes the specified thread priority node from the corresponding
453 * thread priority aggregation.
454 *
455 * The caller must be the owner of the thread wait lock.
456 *
457 * @param the_thread The thread.
458 * @param priority_node The thread priority node to remove.
459 * @param queue_context The thread queue context to return an updated set of
460 *   threads for _Thread_Priority_update().  The thread queue context must be
461 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
462 *   call of this function.
463 *
464 * @see _Thread_Wait_acquire().
465 */
466void _Thread_Priority_remove(
467  Thread_Control       *the_thread,
468  Priority_Node        *priority_node,
469  Thread_queue_Context *queue_context
470);
471
472/**
473 * @brief Propagates a thread priority value change in the specified thread
474 * priority node to the corresponding thread priority aggregation.
475 *
476 * The caller must be the owner of the thread wait lock.
477 *
478 * @param the_thread The thread.
479 * @param priority_node The thread priority node to change.
480 * @param prepend_it In case this is true, then the thread is prepended to
481 *   its priority group in its home scheduler instance, otherwise it is
482 *   appended.
483 * @param queue_context The thread queue context to return an updated set of
484 *   threads for _Thread_Priority_update().  The thread queue context must be
485 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
486 *   call of this function.
487 *
488 * @see _Thread_Wait_acquire().
489 */
490void _Thread_Priority_changed(
491  Thread_Control       *the_thread,
492  Priority_Node        *priority_node,
493  bool                  prepend_it,
494  Thread_queue_Context *queue_context
495);
496
497/**
498 * @brief Changes the thread priority value of the specified thread priority
499 * node in the corresponding thread priority aggregation.
500 *
501 * The caller must be the owner of the thread wait lock.
502 *
503 * @param the_thread The thread.
504 * @param priority_node The thread priority node to change.
505 * @param new_priority The new thread priority value of the thread priority
506 *   node to change.
507 * @param prepend_it In case this is true, then the thread is prepended to
508 *   its priority group in its home scheduler instance, otherwise it is
509 *   appended.
510 * @param queue_context The thread queue context to return an updated set of
511 *   threads for _Thread_Priority_update().  The thread queue context must be
512 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
513 *   call of this function.
514 *
515 * @see _Thread_Wait_acquire().
516 */
517RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
518  Thread_Control       *the_thread,
519  Priority_Node        *priority_node,
520  Priority_Control      new_priority,
521  bool                  prepend_it,
522  Thread_queue_Context *queue_context
523)
524{
525  _Priority_Node_set_priority( priority_node, new_priority );
526  _Thread_Priority_changed(
527    the_thread,
528    priority_node,
529    prepend_it,
530    queue_context
531  );
532}
533
534/**
535 * @brief Replaces the victim priority node with the replacement priority node
536 * in the corresponding thread priority aggregation.
537 *
538 * The caller must be the owner of the thread wait lock.
539 *
540 * @param the_thread The thread.
541 * @param victim_node The victim thread priority node.
542 * @param replacement_node The replacement thread priority node.
543 *
544 * @see _Thread_Wait_acquire().
545 */
546void _Thread_Priority_replace(
547  Thread_Control *the_thread,
548  Priority_Node  *victim_node,
549  Priority_Node  *replacement_node
550);
551
552/**
553 * @brief Adds a priority node to the corresponding thread priority
554 * aggregation.
555 *
556 * The caller must be the owner of the thread wait lock.
557 *
558 * @param the_thread The thread.
559 * @param priority_node The thread priority node to add.
560 * @param queue_context The thread queue context to return an updated set of
561 *   threads for _Thread_Priority_update().  The thread queue context must be
562 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
563 *   call of this function.
564 *
565 * @see _Thread_Priority_add(), _Thread_Priority_change(),
566 *   _Thread_Priority_changed() and _Thread_Priority_remove().
567 */
568void _Thread_Priority_update( Thread_queue_Context *queue_context );
569
570#if defined(RTEMS_SMP)
571void _Thread_Priority_and_sticky_update(
572  Thread_Control *the_thread,
573  int             sticky_level_change
574);
575#endif
576
577/**
578 * @brief Returns true if the left thread priority is less than the right
579 * thread priority in the intuitive sense of priority and false otherwise.
580 */
581RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
582  Priority_Control left,
583  Priority_Control right
584)
585{
586  return left > right;
587}
588
589/**
590 * @brief Returns the highest priority of the left and right thread priorities
591 * in the intuitive sense of priority.
592 */
593RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
594  Priority_Control left,
595  Priority_Control right
596)
597{
598  return _Thread_Priority_less_than( left, right ) ? right : left;
599}
600
601RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
602  Objects_Id id
603)
604{
605  uint32_t the_api;
606
607  the_api = _Objects_Get_API( id );
608
609  if ( !_Objects_Is_api_valid( the_api ) ) {
610    return NULL;
611  }
612
613  /*
614   * Threads are always first class :)
615   *
616   * There is no need to validate the object class of the object identifier,
617   * since this will be done by the object get methods.
618   */
619  return _Objects_Information_table[ the_api ][ 1 ];
620}
621
622/**
623 * @brief Gets a thread by its identifier.
624 *
625 * @see _Objects_Get().
626 */
627Thread_Control *_Thread_Get(
628  Objects_Id         id,
629  ISR_lock_Context  *lock_context
630);
631
632RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
633  const Thread_Control *thread
634)
635{
636#if defined(RTEMS_SMP)
637  return thread->Scheduler.cpu;
638#else
639  (void) thread;
640
641  return _Per_CPU_Get();
642#endif
643}
644
645RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
646  Thread_Control *thread,
647  Per_CPU_Control *cpu
648)
649{
650#if defined(RTEMS_SMP)
651  thread->Scheduler.cpu = cpu;
652#else
653  (void) thread;
654  (void) cpu;
655#endif
656}
657
658/**
659 * This function returns true if the_thread is the currently executing
660 * thread, and false otherwise.
661 */
662
663RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
664  const Thread_Control *the_thread
665)
666{
667  return ( the_thread == _Thread_Executing );
668}
669
670#if defined(RTEMS_SMP)
671/**
672 * @brief Returns @a true in case the thread executes currently on some
673 * processor in the system, otherwise @a false.
674 *
675 * Do not confuse this with _Thread_Is_executing() which checks only the
676 * current processor.
677 */
678RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
679  const Thread_Control *the_thread
680)
681{
682  return _CPU_Context_Get_is_executing( &the_thread->Registers );
683}
684#endif
685
686/**
687 * This function returns true if the_thread is the heir
688 * thread, and false otherwise.
689 */
690
691RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
692  const Thread_Control *the_thread
693)
694{
695  return ( the_thread == _Thread_Heir );
696}
697
698/**
699 * This routine clears any blocking state for the_thread.  It performs
700 * any necessary scheduling operations including the selection of
701 * a new heir thread.
702 */
703
704RTEMS_INLINE_ROUTINE void _Thread_Unblock (
705  Thread_Control *the_thread
706)
707{
708  _Thread_Clear_state( the_thread, STATES_BLOCKED );
709}
710
711/**
712 * This function returns true if the floating point context of
713 * the_thread is currently loaded in the floating point unit, and
714 * false otherwise.
715 */
716
717#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
718RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
719  const Thread_Control *the_thread
720)
721{
722  return ( the_thread == _Thread_Allocated_fp );
723}
724#endif
725
726/*
727 *  If the CPU has hardware floating point, then we must address saving
728 *  and restoring it as part of the context switch.
729 *
730 *  The second conditional compilation section selects the algorithm used
731 *  to context switch between floating point tasks.  The deferred algorithm
732 *  can be significantly better in a system with few floating point tasks
733 *  because it reduces the total number of save and restore FP context
734 *  operations.  However, this algorithm can not be used on all CPUs due
735 *  to unpredictable use of FP registers by some compilers for integer
736 *  operations.
737 */
738
739RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
740{
741#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
742#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
743  if ( executing->fp_context != NULL )
744    _Context_Save_fp( &executing->fp_context );
745#endif
746#endif
747}
748
749RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
750{
751#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
752#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
753  if ( (executing->fp_context != NULL) &&
754       !_Thread_Is_allocated_fp( executing ) ) {
755    if ( _Thread_Allocated_fp != NULL )
756      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
757    _Context_Restore_fp( &executing->fp_context );
758    _Thread_Allocated_fp = executing;
759  }
760#else
761  if ( executing->fp_context != NULL )
762    _Context_Restore_fp( &executing->fp_context );
763#endif
764#endif
765}
766
767/**
768 * This routine is invoked when the currently loaded floating
769 * point context is now longer associated with an active thread.
770 */
771
772#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
773RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
774{
775  _Thread_Allocated_fp = NULL;
776}
777#endif
778
779/**
780 * This function returns true if dispatching is disabled, and false
781 * otherwise.
782 */
783
784RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
785{
786  return ( _Thread_Dispatch_necessary );
787}
788
789/**
790 * This function returns true if the_thread is NULL and false otherwise.
791 */
792
793RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
794  const Thread_Control *the_thread
795)
796{
797  return ( the_thread == NULL );
798}
799
800/**
801 * @brief Is proxy blocking.
802 *
803 * status which indicates that a proxy is blocking, and false otherwise.
804 */
805RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
806  uint32_t   code
807)
808{
809  return (code == THREAD_STATUS_PROXY_BLOCKING);
810}
811
812RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
813{
814  /* Idle threads */
815  uint32_t maximum_internal_threads =
816    rtems_configuration_get_maximum_processors();
817
818  /* MPCI thread */
819#if defined(RTEMS_MULTIPROCESSING)
820  if ( _System_state_Is_multiprocessing ) {
821    ++maximum_internal_threads;
822  }
823#endif
824
825  return maximum_internal_threads;
826}
827
828RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
829{
830  return (Thread_Control *)
831    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
832}
833
834/**
835 * @brief Gets the heir of the processor and makes it executing.
836 *
837 * Must be called with interrupts disabled.  The thread dispatch necessary
838 * indicator is cleared as a side-effect.
839 *
840 * @return The heir thread.
841 *
842 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
843 * _Thread_Dispatch_update_heir().
844 */
845RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
846  Per_CPU_Control *cpu_self
847)
848{
849  Thread_Control *heir;
850
851  heir = cpu_self->heir;
852  cpu_self->dispatch_necessary = false;
853  cpu_self->executing = heir;
854
855  return heir;
856}
857
858RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
859  Thread_Control  *the_thread,
860  Per_CPU_Control *cpu
861)
862{
863  Timestamp_Control last;
864  Timestamp_Control ran;
865
866  last = cpu->cpu_usage_timestamp;
867  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
868  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
869  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
870}
871
872#if defined( RTEMS_SMP )
873RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
874  Per_CPU_Control *cpu_self,
875  Per_CPU_Control *cpu_for_heir,
876  Thread_Control  *heir
877)
878{
879  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
880
881  cpu_for_heir->heir = heir;
882
883  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
884}
885#endif
886
887void _Thread_Get_CPU_time_used(
888  Thread_Control    *the_thread,
889  Timestamp_Control *cpu_time_used
890);
891
892RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
893  Thread_Action_control *action_control
894)
895{
896  _Chain_Initialize_empty( &action_control->Chain );
897}
898
899RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
900  Thread_Action *action
901)
902{
903  _Chain_Set_off_chain( &action->Node );
904}
905
906RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
907  Thread_Control        *the_thread,
908  Thread_Action         *action,
909  Thread_Action_handler  handler
910)
911{
912  Per_CPU_Control *cpu_of_thread;
913
914  _Assert( _Thread_State_is_owner( the_thread ) );
915
916  cpu_of_thread = _Thread_Get_CPU( the_thread );
917
918  action->handler = handler;
919
920  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
921
922  _Chain_Append_if_is_off_chain_unprotected(
923    &the_thread->Post_switch_actions.Chain,
924    &action->Node
925  );
926}
927
928RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
929  Thread_Life_state life_state
930)
931{
932  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
933}
934
935RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
936  Thread_Life_state life_state
937)
938{
939  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
940}
941
942RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
943  Thread_Life_state life_state
944)
945{
946  return ( life_state
947    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
948}
949
950RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
951  Thread_Life_state life_state
952)
953{
954  return ( life_state
955    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
956}
957
958RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
959  const Thread_Control *the_thread
960)
961{
962  _Assert( _Thread_State_is_owner( the_thread ) );
963  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
964}
965
966RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
967  Thread_Control *the_thread
968)
969{
970#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
971  ++the_thread->resource_count;
972#else
973  (void) the_thread;
974#endif
975}
976
977RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
978  Thread_Control *the_thread
979)
980{
981#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
982  --the_thread->resource_count;
983#else
984  (void) the_thread;
985#endif
986}
987
988#if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT)
989/**
990 * @brief Returns true if the thread owns resources, and false otherwise.
991 *
992 * Resources are accounted with the Thread_Control::resource_count resource
993 * counter.  This counter is used by mutex objects for example.
994 *
995 * @param[in] the_thread The thread.
996 */
997RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
998  const Thread_Control *the_thread
999)
1000{
1001  return the_thread->resource_count != 0;
1002}
1003#endif
1004
1005#if defined(RTEMS_SMP)
1006RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
1007  Thread_Control  *the_thread,
1008  Per_CPU_Control *cpu
1009)
1010{
1011  _Per_CPU_Acquire( cpu );
1012
1013  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
1014    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
1015    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
1016  }
1017
1018  _Per_CPU_Release( cpu );
1019}
1020#endif
1021
1022RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
1023  const Thread_Control *the_thread
1024)
1025{
1026#if defined(RTEMS_SMP)
1027  return the_thread->Scheduler.home_scheduler;
1028#else
1029  (void) the_thread;
1030  return &_Scheduler_Table[ 0 ];
1031#endif
1032}
1033
1034RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1035  const Thread_Control *the_thread
1036)
1037{
1038#if defined(RTEMS_SMP)
1039  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1040  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1041    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1042  );
1043#else
1044  return the_thread->Scheduler.nodes;
1045#endif
1046}
1047
1048RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1049  const Thread_Control *the_thread,
1050  size_t                scheduler_index
1051)
1052{
1053#if defined(RTEMS_SMP)
1054  return (Scheduler_Node *)
1055    ( (uintptr_t) the_thread->Scheduler.nodes
1056      + scheduler_index * _Scheduler_Node_size );
1057#else
1058  _Assert( scheduler_index == 0 );
1059  (void) scheduler_index;
1060  return the_thread->Scheduler.nodes;
1061#endif
1062}
1063
1064#if defined(RTEMS_SMP)
1065RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1066  Thread_Control   *the_thread,
1067  ISR_lock_Context *lock_context
1068)
1069{
1070  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1071}
1072
1073RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1074  Thread_Control   *the_thread,
1075  ISR_lock_Context *lock_context
1076)
1077{
1078  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1079}
1080
1081void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1082
1083RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1084  Thread_Control         *the_thread,
1085  Scheduler_Node         *scheduler_node,
1086  Scheduler_Node_request  request
1087)
1088{
1089  ISR_lock_Context       lock_context;
1090  Scheduler_Node_request current_request;
1091
1092  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1093
1094  current_request = scheduler_node->Thread.request;
1095
1096  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1097    _Assert(
1098      request == SCHEDULER_NODE_REQUEST_ADD
1099        || request == SCHEDULER_NODE_REQUEST_REMOVE
1100    );
1101    _Assert( scheduler_node->Thread.next_request == NULL );
1102    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1103    the_thread->Scheduler.requests = scheduler_node;
1104  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1105    _Assert(
1106      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1107        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1108      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1109        && request == SCHEDULER_NODE_REQUEST_ADD )
1110    );
1111    request = SCHEDULER_NODE_REQUEST_NOTHING;
1112  }
1113
1114  scheduler_node->Thread.request = request;
1115
1116  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1117}
1118
1119RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1120  Thread_Control *the_thread,
1121  Scheduler_Node *scheduler_node
1122)
1123{
1124  _Chain_Append_unprotected(
1125    &the_thread->Scheduler.Wait_nodes,
1126    &scheduler_node->Thread.Wait_node
1127  );
1128  _Thread_Scheduler_add_request(
1129    the_thread,
1130    scheduler_node,
1131    SCHEDULER_NODE_REQUEST_ADD
1132  );
1133}
1134
1135RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1136  Thread_Control *the_thread,
1137  Scheduler_Node *scheduler_node
1138)
1139{
1140  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1141  _Thread_Scheduler_add_request(
1142    the_thread,
1143    scheduler_node,
1144    SCHEDULER_NODE_REQUEST_REMOVE
1145  );
1146}
1147#endif
1148
1149/**
1150 * @brief Returns the priority of the thread.
1151 *
1152 * Returns the user API and thread wait information relevant thread priority.
1153 * This includes temporary thread priority adjustments due to locking
1154 * protocols, a job release or the POSIX sporadic server for example.
1155 *
1156 * @return The priority of the thread.
1157 */
1158RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1159  const Thread_Control *the_thread
1160)
1161{
1162  Scheduler_Node *scheduler_node;
1163
1164  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1165  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1166}
1167
1168/**
1169 * @brief Acquires the thread wait default lock inside a critical section
1170 * (interrupts disabled).
1171 *
1172 * @param[in] the_thread The thread.
1173 * @param[in] lock_context The lock context used for the corresponding lock
1174 *   release.
1175 *
1176 * @see _Thread_Wait_release_default_critical().
1177 */
1178RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1179  Thread_Control   *the_thread,
1180  ISR_lock_Context *lock_context
1181)
1182{
1183  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1184}
1185
1186/**
1187 * @brief Acquires the thread wait default lock and returns the executing
1188 * thread.
1189 *
1190 * @param[in] lock_context The lock context used for the corresponding lock
1191 *   release.
1192 *
1193 * @return The executing thread.
1194 *
1195 * @see _Thread_Wait_release_default().
1196 */
1197RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1198  ISR_lock_Context *lock_context
1199)
1200{
1201  Thread_Control *executing;
1202
1203  _ISR_lock_ISR_disable( lock_context );
1204  executing = _Thread_Executing;
1205  _Thread_Wait_acquire_default_critical( executing, lock_context );
1206
1207  return executing;
1208}
1209
1210/**
1211 * @brief Acquires the thread wait default lock and disables interrupts.
1212 *
1213 * @param[in] the_thread The thread.
1214 * @param[in] lock_context The lock context used for the corresponding lock
1215 *   release.
1216 *
1217 * @see _Thread_Wait_release_default().
1218 */
1219RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1220  Thread_Control   *the_thread,
1221  ISR_lock_Context *lock_context
1222)
1223{
1224  _ISR_lock_ISR_disable( lock_context );
1225  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1226}
1227
1228/**
1229 * @brief Releases the thread wait default lock inside a critical section
1230 * (interrupts disabled).
1231 *
1232 * The previous interrupt status is not restored.
1233 *
1234 * @param[in] the_thread The thread.
1235 * @param[in] lock_context The lock context used for the corresponding lock
1236 *   acquire.
1237 */
1238RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1239  Thread_Control   *the_thread,
1240  ISR_lock_Context *lock_context
1241)
1242{
1243  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1244}
1245
1246/**
1247 * @brief Releases the thread wait default lock and restores the previous
1248 * interrupt status.
1249 *
1250 * @param[in] the_thread The thread.
1251 * @param[in] lock_context The lock context used for the corresponding lock
1252 *   acquire.
1253 */
1254RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1255  Thread_Control   *the_thread,
1256  ISR_lock_Context *lock_context
1257)
1258{
1259  _Thread_Wait_release_default_critical( the_thread, lock_context );
1260  _ISR_lock_ISR_enable( lock_context );
1261}
1262
1263#if defined(RTEMS_SMP)
1264#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1265  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1266
1267RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1268  Thread_Control            *the_thread,
1269  Thread_queue_Lock_context *queue_lock_context
1270)
1271{
1272  Chain_Node *first;
1273
1274  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1275  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1276
1277  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1278    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1279  }
1280}
1281
1282RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1283  Thread_queue_Queue        *queue,
1284  Thread_queue_Lock_context *queue_lock_context
1285)
1286{
1287  _Thread_queue_Queue_acquire_critical(
1288    queue,
1289    &_Thread_Executing->Potpourri_stats,
1290    &queue_lock_context->Lock_context
1291  );
1292}
1293
1294RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1295  Thread_queue_Queue        *queue,
1296  Thread_queue_Lock_context *queue_lock_context
1297)
1298{
1299  _Thread_queue_Queue_release_critical(
1300    queue,
1301    &queue_lock_context->Lock_context
1302  );
1303}
1304#endif
1305
1306/**
1307 * @brief Acquires the thread wait lock inside a critical section (interrupts
1308 * disabled).
1309 *
1310 * @param[in] the_thread The thread.
1311 * @param[in] queue_context The thread queue context for the corresponding
1312 *   _Thread_Wait_release_critical().
1313 */
1314RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1315  Thread_Control       *the_thread,
1316  Thread_queue_Context *queue_context
1317)
1318{
1319#if defined(RTEMS_SMP)
1320  Thread_queue_Queue *queue;
1321
1322  _Thread_Wait_acquire_default_critical(
1323    the_thread,
1324    &queue_context->Lock_context.Lock_context
1325  );
1326
1327  queue = the_thread->Wait.queue;
1328  queue_context->Lock_context.Wait.queue = queue;
1329
1330  if ( queue != NULL ) {
1331    _Thread_queue_Gate_add(
1332      &the_thread->Wait.Lock.Pending_requests,
1333      &queue_context->Lock_context.Wait.Gate
1334    );
1335    _Thread_Wait_release_default_critical(
1336      the_thread,
1337      &queue_context->Lock_context.Lock_context
1338    );
1339    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1340
1341    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1342      _Thread_Wait_release_queue_critical(
1343        queue,
1344        &queue_context->Lock_context
1345      );
1346      _Thread_Wait_acquire_default_critical(
1347        the_thread,
1348        &queue_context->Lock_context.Lock_context
1349      );
1350      _Thread_Wait_remove_request_locked(
1351        the_thread,
1352        &queue_context->Lock_context
1353      );
1354      _Assert( the_thread->Wait.queue == NULL );
1355    }
1356  }
1357#else
1358  (void) the_thread;
1359  (void) queue_context;
1360#endif
1361}
1362
1363/**
1364 * @brief Acquires the thread wait default lock and disables interrupts.
1365 *
1366 * @param[in] the_thread The thread.
1367 * @param[in] queue_context The thread queue context for the corresponding
1368 *   _Thread_Wait_release().
1369 */
1370RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1371  Thread_Control       *the_thread,
1372  Thread_queue_Context *queue_context
1373)
1374{
1375  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1376  _Thread_Wait_acquire_critical( the_thread, queue_context );
1377}
1378
1379/**
1380 * @brief Releases the thread wait lock inside a critical section (interrupts
1381 * disabled).
1382 *
1383 * The previous interrupt status is not restored.
1384 *
1385 * @param[in] the_thread The thread.
1386 * @param[in] queue_context The thread queue context used for corresponding
1387 *   _Thread_Wait_acquire_critical().
1388 */
1389RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1390  Thread_Control       *the_thread,
1391  Thread_queue_Context *queue_context
1392)
1393{
1394#if defined(RTEMS_SMP)
1395  Thread_queue_Queue *queue;
1396
1397  queue = queue_context->Lock_context.Wait.queue;
1398
1399  if ( queue != NULL ) {
1400    _Thread_Wait_release_queue_critical(
1401      queue, &queue_context->Lock_context
1402    );
1403    _Thread_Wait_acquire_default_critical(
1404      the_thread,
1405      &queue_context->Lock_context.Lock_context
1406    );
1407    _Thread_Wait_remove_request_locked(
1408      the_thread,
1409      &queue_context->Lock_context
1410    );
1411  }
1412
1413  _Thread_Wait_release_default_critical(
1414    the_thread,
1415    &queue_context->Lock_context.Lock_context
1416  );
1417#else
1418  (void) the_thread;
1419  (void) queue_context;
1420#endif
1421}
1422
1423/**
1424 * @brief Releases the thread wait lock and restores the previous interrupt
1425 * status.
1426 *
1427 * @param[in] the_thread The thread.
1428 * @param[in] queue_context The thread queue context used for corresponding
1429 *   _Thread_Wait_acquire().
1430 */
1431RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1432  Thread_Control       *the_thread,
1433  Thread_queue_Context *queue_context
1434)
1435{
1436  _Thread_Wait_release_critical( the_thread, queue_context );
1437  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1438}
1439
1440/**
1441 * @brief Claims the thread wait queue.
1442 *
1443 * The caller must not be the owner of the default thread wait lock.  The
1444 * caller must be the owner of the corresponding thread queue lock.  The
1445 * registration of the corresponding thread queue operations is deferred and
1446 * done after the deadlock detection.  This is crucial to support timeouts on
1447 * SMP configurations.
1448 *
1449 * @param[in] the_thread The thread.
1450 * @param[in] queue The new thread queue.
1451 *
1452 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
1453 */
1454RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1455  Thread_Control     *the_thread,
1456  Thread_queue_Queue *queue
1457)
1458{
1459  ISR_lock_Context lock_context;
1460
1461  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1462
1463  _Assert( the_thread->Wait.queue == NULL );
1464
1465#if defined(RTEMS_SMP)
1466  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1467  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1468  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1469#endif
1470
1471  the_thread->Wait.queue = queue;
1472
1473  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1474}
1475
1476/**
1477 * @brief Finalizes the thread wait queue claim via registration of the
1478 * corresponding thread queue operations.
1479 *
1480 * @param[in] the_thread The thread.
1481 * @param[in] operations The corresponding thread queue operations.
1482 */
1483RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
1484  Thread_Control                *the_thread,
1485  const Thread_queue_Operations *operations
1486)
1487{
1488  the_thread->Wait.operations = operations;
1489}
1490
1491/**
1492 * @brief Removes a thread wait lock request.
1493 *
1494 * On SMP configurations, removes a thread wait lock request.
1495 *
1496 * On other configurations, this function does nothing.
1497 *
1498 * @param[in] the_thread The thread.
1499 * @param[in] queue_lock_context The thread queue lock context used for
1500 *   corresponding _Thread_Wait_acquire().
1501 */
1502RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1503  Thread_Control            *the_thread,
1504  Thread_queue_Lock_context *queue_lock_context
1505)
1506{
1507#if defined(RTEMS_SMP)
1508  ISR_lock_Context lock_context;
1509
1510  _Thread_Wait_acquire_default( the_thread, &lock_context );
1511  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1512  _Thread_Wait_release_default( the_thread, &lock_context );
1513#else
1514  (void) the_thread;
1515  (void) queue_lock_context;
1516#endif
1517}
1518
1519/**
1520 * @brief Restores the default thread wait queue and operations.
1521 *
1522 * The caller must be the owner of the current thread wait queue lock.
1523 *
1524 * On SMP configurations, the pending requests are updated to use the stale
1525 * thread queue operations.
1526 *
1527 * @param[in] the_thread The thread.
1528 *
1529 * @see _Thread_Wait_claim().
1530 */
1531RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1532  Thread_Control *the_thread
1533)
1534{
1535#if defined(RTEMS_SMP)
1536  ISR_lock_Context  lock_context;
1537  Chain_Node       *node;
1538  const Chain_Node *tail;
1539
1540  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1541
1542  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1543  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1544
1545  if ( node != tail ) {
1546    do {
1547      Thread_queue_Context *queue_context;
1548
1549      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1550      queue_context->Lock_context.Wait.queue = NULL;
1551
1552      node = _Chain_Next( node );
1553    } while ( node != tail );
1554
1555    _Thread_queue_Gate_add(
1556      &the_thread->Wait.Lock.Pending_requests,
1557      &the_thread->Wait.Lock.Tranquilizer
1558    );
1559  } else {
1560    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1561  }
1562#endif
1563
1564  the_thread->Wait.queue = NULL;
1565  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1566
1567#if defined(RTEMS_SMP)
1568  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1569#endif
1570}
1571
1572/**
1573 * @brief Tranquilizes the thread after a wait on a thread queue.
1574 *
1575 * After the violent blocking procedure this function makes the thread calm and
1576 * peaceful again so that it can carry out its normal work.
1577 *
1578 * On SMP configurations, ensures that all pending thread wait lock requests
1579 * completed before the thread is able to begin a new thread wait procedure.
1580 *
1581 * On other configurations, this function does nothing.
1582 *
1583 * It must be called after a _Thread_Wait_claim() exactly once
1584 *  - after the corresponding thread queue lock was released, and
1585 *  - the default wait state is restored or some other processor is about to do
1586 *    this.
1587 *
1588 * @param[in] the_thread The thread.
1589 */
1590RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1591  Thread_Control *the_thread
1592)
1593{
1594#if defined(RTEMS_SMP)
1595  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1596#else
1597  (void) the_thread;
1598#endif
1599}
1600
1601/**
1602 * @brief Cancels a thread wait on a thread queue.
1603 *
1604 * @param[in] the_thread The thread.
1605 * @param[in] queue_context The thread queue context used for corresponding
1606 *   _Thread_Wait_acquire().
1607 */
1608RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1609  Thread_Control       *the_thread,
1610  Thread_queue_Context *queue_context
1611)
1612{
1613  Thread_queue_Queue *queue;
1614
1615  queue = the_thread->Wait.queue;
1616
1617#if defined(RTEMS_SMP)
1618  if ( queue != NULL ) {
1619    _Assert( queue_context->Lock_context.Wait.queue == queue );
1620#endif
1621
1622    ( *the_thread->Wait.operations->extract )(
1623      queue,
1624      the_thread,
1625      queue_context
1626    );
1627    _Thread_Wait_restore_default( the_thread );
1628
1629#if defined(RTEMS_SMP)
1630    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1631    queue_context->Lock_context.Wait.queue = queue;
1632  }
1633#endif
1634}
1635
1636/**
1637 * @brief The initial thread wait flags value set by _Thread_Initialize().
1638 */
1639#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1640
1641/**
1642 * @brief Mask to get the thread wait state flags.
1643 */
1644#define THREAD_WAIT_STATE_MASK 0xffU
1645
1646/**
1647 * @brief Indicates that the thread begins with the blocking operation.
1648 *
1649 * A blocking operation consists of an optional watchdog initialization and the
1650 * setting of the appropriate thread blocking state with the corresponding
1651 * scheduler block operation.
1652 */
1653#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1654
1655/**
1656 * @brief Indicates that the thread completed the blocking operation.
1657 */
1658#define THREAD_WAIT_STATE_BLOCKED 0x2U
1659
1660/**
1661 * @brief Indicates that a condition to end the thread wait occurred.
1662 *
1663 * This could be a timeout, a signal, an event or a resource availability.
1664 */
1665#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1666
1667/**
1668 * @brief Mask to get the thread wait class flags.
1669 */
1670#define THREAD_WAIT_CLASS_MASK 0xff00U
1671
1672/**
1673 * @brief Indicates that the thread waits for an event.
1674 */
1675#define THREAD_WAIT_CLASS_EVENT 0x100U
1676
1677/**
1678 * @brief Indicates that the thread waits for a system event.
1679 */
1680#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1681
1682/**
1683 * @brief Indicates that the thread waits for an object.
1684 */
1685#define THREAD_WAIT_CLASS_OBJECT 0x400U
1686
1687/**
1688 * @brief Indicates that the thread waits for a period.
1689 */
1690#define THREAD_WAIT_CLASS_PERIOD 0x800U
1691
1692RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1693  Thread_Control    *the_thread,
1694  Thread_Wait_flags  flags
1695)
1696{
1697#if defined(RTEMS_SMP)
1698  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1699#else
1700  the_thread->Wait.flags = flags;
1701#endif
1702}
1703
1704RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1705  const Thread_Control *the_thread
1706)
1707{
1708#if defined(RTEMS_SMP)
1709  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1710#else
1711  return the_thread->Wait.flags;
1712#endif
1713}
1714
1715RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
1716  const Thread_Control *the_thread
1717)
1718{
1719#if defined(RTEMS_SMP)
1720  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
1721#else
1722  return the_thread->Wait.flags;
1723#endif
1724}
1725
1726/**
1727 * @brief Tries to change the thread wait flags with release semantics in case
1728 * of success.
1729 *
1730 * Must be called inside a critical section (interrupts disabled).
1731 *
1732 * In case the wait flags are equal to the expected wait flags, then the wait
1733 * flags are set to the desired wait flags.
1734 *
1735 * @param[in] the_thread The thread.
1736 * @param[in] expected_flags The expected wait flags.
1737 * @param[in] desired_flags The desired wait flags.
1738 *
1739 * @retval true The wait flags were equal to the expected wait flags.
1740 * @retval false Otherwise.
1741 */
1742RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1743  Thread_Control    *the_thread,
1744  Thread_Wait_flags  expected_flags,
1745  Thread_Wait_flags  desired_flags
1746)
1747{
1748  _Assert( _ISR_Get_level() != 0 );
1749
1750#if defined(RTEMS_SMP)
1751  return _Atomic_Compare_exchange_uint(
1752    &the_thread->Wait.flags,
1753    &expected_flags,
1754    desired_flags,
1755    ATOMIC_ORDER_RELEASE,
1756    ATOMIC_ORDER_RELAXED
1757  );
1758#else
1759  bool success = ( the_thread->Wait.flags == expected_flags );
1760
1761  if ( success ) {
1762    the_thread->Wait.flags = desired_flags;
1763  }
1764
1765  return success;
1766#endif
1767}
1768
1769/**
1770 * @brief Tries to change the thread wait flags with acquire semantics.
1771 *
1772 * In case the wait flags are equal to the expected wait flags, then the wait
1773 * flags are set to the desired wait flags.
1774 *
1775 * @param[in] the_thread The thread.
1776 * @param[in] expected_flags The expected wait flags.
1777 * @param[in] desired_flags The desired wait flags.
1778 *
1779 * @retval true The wait flags were equal to the expected wait flags.
1780 * @retval false Otherwise.
1781 */
1782RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1783  Thread_Control    *the_thread,
1784  Thread_Wait_flags  expected_flags,
1785  Thread_Wait_flags  desired_flags
1786)
1787{
1788#if defined(RTEMS_SMP)
1789  return _Atomic_Compare_exchange_uint(
1790    &the_thread->Wait.flags,
1791    &expected_flags,
1792    desired_flags,
1793    ATOMIC_ORDER_ACQUIRE,
1794    ATOMIC_ORDER_ACQUIRE
1795  );
1796#else
1797  bool      success;
1798  ISR_Level level;
1799
1800  _ISR_Local_disable( level );
1801
1802  success = _Thread_Wait_flags_try_change_release(
1803    the_thread,
1804    expected_flags,
1805    desired_flags
1806  );
1807
1808  _ISR_Local_enable( level );
1809  return success;
1810#endif
1811}
1812
1813/**
1814 * @brief Returns the object identifier of the object containing the current
1815 * thread wait queue.
1816 *
1817 * This function may be used for debug and system information purposes.  The
1818 * caller must be the owner of the thread lock.
1819 *
1820 * @retval 0 The thread waits on no thread queue currently, the thread wait
1821 *   queue is not contained in an object, or the current thread state provides
1822 *   insufficient information, e.g. the thread is in the middle of a blocking
1823 *   operation.
1824 * @retval other The object identifier of the object containing the thread wait
1825 *   queue.
1826 */
1827Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1828
1829RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1830  const Thread_Control *the_thread
1831)
1832{
1833  return (Status_Control) the_thread->Wait.return_code;
1834}
1835
1836/**
1837 * @brief Cancels a blocking operation so that the thread can continue its
1838 * execution.
1839 *
1840 * In case this function actually cancelled the blocking operation, then the
1841 * thread wait return code is set to the specified status.
1842 *
1843 * A specialization of this function is _Thread_Timeout().
1844 *
1845 * @param[in] the_thread The thread.
1846 * @param[in] status The thread wait status.
1847 */
1848void _Thread_Continue( Thread_Control *the_thread, Status_Control status );
1849
1850/**
1851 * @brief General purpose thread wait timeout.
1852 *
1853 * @param[in] the_watchdog The thread timer watchdog.
1854 */
1855void _Thread_Timeout( Watchdog_Control *the_watchdog );
1856
1857RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1858  Thread_Timer_information *timer,
1859  Per_CPU_Control          *cpu
1860)
1861{
1862  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1863  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
1864  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1865}
1866
1867RTEMS_INLINE_ROUTINE void _Thread_Add_timeout_ticks(
1868  Thread_Control    *the_thread,
1869  Per_CPU_Control   *cpu,
1870  Watchdog_Interval  ticks
1871)
1872{
1873  ISR_lock_Context lock_context;
1874
1875  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1876
1877  the_thread->Timer.header =
1878    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_TICKS ];
1879  the_thread->Timer.Watchdog.routine = _Thread_Timeout;
1880  _Watchdog_Per_CPU_insert_ticks( &the_thread->Timer.Watchdog, cpu, ticks );
1881
1882  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1883}
1884
1885RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_realtime(
1886  Thread_Control                 *the_thread,
1887  Per_CPU_Control                *cpu,
1888  Watchdog_Service_routine_entry  routine,
1889  uint64_t                        expire
1890)
1891{
1892  ISR_lock_Context  lock_context;
1893  Watchdog_Header  *header;
1894
1895  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1896
1897  header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_REALTIME ];
1898  the_thread->Timer.header = header;
1899  the_thread->Timer.Watchdog.routine = routine;
1900  _Watchdog_Per_CPU_insert( &the_thread->Timer.Watchdog, cpu, header, expire );
1901
1902  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1903}
1904
1905RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1906{
1907  ISR_lock_Context lock_context;
1908
1909  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1910
1911  _Watchdog_Per_CPU_remove(
1912    &the_thread->Timer.Watchdog,
1913#if defined(RTEMS_SMP)
1914    the_thread->Timer.Watchdog.cpu,
1915#else
1916    _Per_CPU_Get(),
1917#endif
1918    the_thread->Timer.header
1919  );
1920
1921  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1922}
1923
1924RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1925  Thread_Control     *the_thread,
1926  Thread_queue_Queue *queue
1927)
1928{
1929  _Thread_Wait_tranquilize( the_thread );
1930  _Thread_Timer_remove( the_thread );
1931
1932#if defined(RTEMS_MULTIPROCESSING)
1933  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1934    _Thread_Unblock( the_thread );
1935  } else {
1936    _Thread_queue_Unblock_proxy( queue, the_thread );
1937  }
1938#else
1939  (void) queue;
1940  _Thread_Unblock( the_thread );
1941#endif
1942}
1943
1944Status_Control _Thread_Set_name(
1945  Thread_Control *the_thread,
1946  const char     *name
1947);
1948
1949size_t _Thread_Get_name(
1950  const Thread_Control *the_thread,
1951  char                 *buffer,
1952  size_t                buffer_size
1953);
1954
1955#if defined(RTEMS_SMP)
1956#define THREAD_PIN_STEP 2
1957
1958#define THREAD_PIN_PREEMPTION 1
1959
1960void _Thread_Do_unpin(
1961  Thread_Control  *executing,
1962  Per_CPU_Control *cpu_self
1963);
1964#endif
1965
1966RTEMS_INLINE_ROUTINE void _Thread_Pin( Thread_Control *executing )
1967{
1968#if defined(RTEMS_SMP)
1969  _Assert( executing == _Thread_Executing );
1970
1971  executing->Scheduler.pin_level += THREAD_PIN_STEP;
1972#else
1973  (void) executing;
1974#endif
1975}
1976
1977RTEMS_INLINE_ROUTINE void _Thread_Unpin(
1978  Thread_Control  *executing,
1979  Per_CPU_Control *cpu_self
1980)
1981{
1982#if defined(RTEMS_SMP)
1983  unsigned int pin_level;
1984
1985  _Assert( executing == _Thread_Executing );
1986
1987  pin_level = executing->Scheduler.pin_level;
1988  _Assert( pin_level > 0 );
1989
1990  if (
1991    RTEMS_PREDICT_TRUE(
1992      pin_level != ( THREAD_PIN_STEP | THREAD_PIN_PREEMPTION )
1993    )
1994  ) {
1995    executing->Scheduler.pin_level = pin_level - THREAD_PIN_STEP;
1996  } else {
1997    _Thread_Do_unpin( executing, cpu_self );
1998  }
1999#else
2000  (void) executing;
2001  (void) cpu_self;
2002#endif
2003}
2004
2005/** @}*/
2006
2007#ifdef __cplusplus
2008}
2009#endif
2010
2011#if defined(RTEMS_MULTIPROCESSING)
2012#include <rtems/score/threadmp.h>
2013#endif
2014
2015#endif
2016/* end of include file */
Note: See TracBrowser for help on using the repository browser.