source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 5d6b211

5
Last change on this file since 5d6b211 was 5d6b211, checked in by Sebastian Huber <sebastian.huber@…>, on 09/07/16 at 07:04:45

score: Add scheduler node table for each thread

Update #2556.

  • Property mode set to 100644
File size: 47.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2016 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/resourceimpl.h>
31#include <rtems/score/schedulernodeimpl.h>
32#include <rtems/score/statesimpl.h>
33#include <rtems/score/status.h>
34#include <rtems/score/sysstate.h>
35#include <rtems/score/threadqimpl.h>
36#include <rtems/score/todimpl.h>
37#include <rtems/score/freechain.h>
38#include <rtems/score/watchdogimpl.h>
39#include <rtems/config.h>
40
41#ifdef __cplusplus
42extern "C" {
43#endif
44
45/**
46 * @addtogroup ScoreThread
47 */
48/**@{**/
49
50/**
51 *  The following structure contains the information necessary to manage
52 *  a thread which it is  waiting for a resource.
53 */
54#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
55
56/**
57 *  Self for the GNU Ada Run-Time
58 */
59extern void *rtems_ada_self;
60
61typedef struct {
62  Objects_Information Objects;
63
64  Freechain_Control Free_thread_queue_heads;
65} Thread_Information;
66
67/**
68 *  The following defines the information control block used to
69 *  manage this class of objects.
70 */
71extern Thread_Information _Thread_Internal_information;
72
73/**
74 *  The following points to the thread whose floating point
75 *  context is currently loaded.
76 */
77#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
78extern Thread_Control *_Thread_Allocated_fp;
79#endif
80
81#if defined(RTEMS_SMP)
82#define THREAD_RESOURCE_NODE_TO_THREAD( node ) \
83  RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node )
84#endif
85
86void _Thread_Initialize_information(
87  Thread_Information  *information,
88  Objects_APIs         the_api,
89  uint16_t             the_class,
90  uint32_t             maximum,
91  bool                 is_string,
92  uint32_t             maximum_name_length
93);
94
95/**
96 *  @brief Initialize thread handler.
97 *
98 *  This routine performs the initialization necessary for this handler.
99 */
100void _Thread_Handler_initialization(void);
101
102/**
103 *  @brief Create idle thread.
104 *
105 *  This routine creates the idle thread.
106 *
107 *  @warning No thread should be created before this one.
108 */
109void _Thread_Create_idle(void);
110
111/**
112 *  @brief Start thread multitasking.
113 *
114 *  This routine initiates multitasking.  It is invoked only as
115 *  part of initialization and its invocation is the last act of
116 *  the non-multitasking part of the system initialization.
117 */
118void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
119
120/**
121 *  @brief Allocate the requested stack space for the thread.
122 *
123 *  Allocate the requested stack space for the thread.
124 *  Set the Start.stack field to the address of the stack.
125 *
126 *  @param[in] the_thread is the thread where the stack space is requested
127 *  @param[in] stack_size is the stack space is requested
128 *
129 *  @retval actual size allocated after any adjustment
130 *  @retval zero if the allocation failed
131 */
132size_t _Thread_Stack_Allocate(
133  Thread_Control *the_thread,
134  size_t          stack_size
135);
136
137/**
138 *  @brief Deallocate thread stack.
139 *
140 *  Deallocate the Thread's stack.
141 */
142void _Thread_Stack_Free(
143  Thread_Control *the_thread
144);
145
146/**
147 *  @brief Initialize thread.
148 *
149 *  This routine initializes the specified the thread.  It allocates
150 *  all memory associated with this thread.  It completes by adding
151 *  the thread to the local object table so operations on this
152 *  thread id are allowed.
153 *
154 *  @note If stack_area is NULL, it is allocated from the workspace.
155 *
156 *  @note If the stack is allocated from the workspace, then it is
157 *        guaranteed to be of at least minimum size.
158 */
159bool _Thread_Initialize(
160  Thread_Information                   *information,
161  Thread_Control                       *the_thread,
162  const struct Scheduler_Control       *scheduler,
163  void                                 *stack_area,
164  size_t                                stack_size,
165  bool                                  is_fp,
166  Priority_Control                      priority,
167  bool                                  is_preemptible,
168  Thread_CPU_budget_algorithms          budget_algorithm,
169  Thread_CPU_budget_algorithm_callout   budget_callout,
170  uint32_t                              isr_level,
171  Objects_Name                          name
172);
173
174/**
175 *  @brief Initializes thread and executes it.
176 *
177 *  This routine initializes the executable information for a thread
178 *  and makes it ready to execute.  After this routine executes, the
179 *  thread competes with all other threads for CPU time.
180 *
181 *  @param the_thread The thread to be started.
182 *  @param entry The thread entry information.
183 */
184bool _Thread_Start(
185  Thread_Control                 *the_thread,
186  const Thread_Entry_information *entry,
187  ISR_lock_Context               *lock_context
188);
189
190void _Thread_Restart_self(
191  Thread_Control                 *executing,
192  const Thread_Entry_information *entry,
193  ISR_lock_Context               *lock_context
194) RTEMS_NO_RETURN;
195
196bool _Thread_Restart_other(
197  Thread_Control                 *the_thread,
198  const Thread_Entry_information *entry,
199  ISR_lock_Context               *lock_context
200);
201
202void _Thread_Yield( Thread_Control *executing );
203
204Thread_Life_state _Thread_Change_life(
205  Thread_Life_state clear,
206  Thread_Life_state set,
207  Thread_Life_state ignore
208);
209
210Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
211
212/**
213 * @brief Kills all zombie threads in the system.
214 *
215 * Threads change into the zombie state as the last step in the thread
216 * termination sequence right before a context switch to the heir thread is
217 * initiated.  Since the thread stack is still in use during this phase we have
218 * to postpone the thread stack reclamation until this point.  On SMP
219 * configurations we may have to busy wait for context switch completion here.
220 */
221void _Thread_Kill_zombies( void );
222
223void _Thread_Exit(
224  Thread_Control    *executing,
225  Thread_Life_state  set,
226  void              *exit_value
227);
228
229void _Thread_Join(
230  Thread_Control       *the_thread,
231  States_Control        waiting_for_join,
232  Thread_Control       *executing,
233  Thread_queue_Context *queue_context
234);
235
236void _Thread_Cancel(
237  Thread_Control *the_thread,
238  Thread_Control *executing,
239  void           *exit_value
240);
241
242/**
243 * @brief Closes the thread.
244 *
245 * Closes the thread object and starts the thread termination sequence.  In
246 * case the executing thread is not terminated, then this function waits until
247 * the terminating thread reached the zombie state.
248 */
249void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
250
251RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
252{
253  return _States_Is_ready( the_thread->current_state );
254}
255
256States_Control _Thread_Clear_state_locked(
257  Thread_Control *the_thread,
258  States_Control  state
259);
260
261/**
262 * @brief Clears the specified thread state.
263 *
264 * In case the previous state is a non-ready state and the next state is the
265 * ready state, then the thread is unblocked by the scheduler.
266 *
267 * @param[in] the_thread The thread.
268 * @param[in] state The state to clear.  It must not be zero.
269 *
270 * @return The previous state.
271 */
272States_Control _Thread_Clear_state(
273  Thread_Control *the_thread,
274  States_Control  state
275);
276
277States_Control _Thread_Set_state_locked(
278  Thread_Control *the_thread,
279  States_Control  state
280);
281
282/**
283 * @brief Sets the specified thread state.
284 *
285 * In case the previous state is the ready state, then the thread is blocked by
286 * the scheduler.
287 *
288 * @param[in] the_thread The thread.
289 * @param[in] state The state to set.  It must not be zero.
290 *
291 * @return The previous state.
292 */
293States_Control _Thread_Set_state(
294  Thread_Control *the_thread,
295  States_Control  state
296);
297
298/**
299 *  @brief Initializes enviroment for a thread.
300 *
301 *  This routine initializes the context of @a the_thread to its
302 *  appropriate starting state.
303 *
304 *  @param[in] the_thread is the pointer to the thread control block.
305 */
306void _Thread_Load_environment(
307  Thread_Control *the_thread
308);
309
310void _Thread_Entry_adaptor_idle( Thread_Control *executing );
311
312void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
313
314void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
315
316/**
317 *  @brief Wrapper function for all threads.
318 *
319 *  This routine is the wrapper function for all threads.  It is
320 *  the starting point for all threads.  The user provided thread
321 *  entry point is invoked by this routine.  Operations
322 *  which must be performed immediately before and after the user's
323 *  thread executes are found here.
324 *
325 *  @note On entry, it is assumed all interrupts are blocked and that this
326 *  routine needs to set the initial isr level.  This may or may not
327 *  actually be needed by the context switch routine and as a result
328 *  interrupts may already be at there proper level.  Either way,
329 *  setting the initial isr level properly here is safe.
330 */
331void _Thread_Handler( void );
332
333/**
334 * @brief Executes the global constructors and then restarts itself as the
335 * first initialization thread.
336 *
337 * The first initialization thread is the first RTEMS initialization task or
338 * the first POSIX initialization thread in case no RTEMS initialization tasks
339 * are present.
340 */
341void _Thread_Global_construction(
342  Thread_Control                 *executing,
343  const Thread_Entry_information *entry
344) RTEMS_NO_RETURN;
345
346/**
347 *  @brief Ended the delay of a thread.
348 *
349 *  This routine is invoked when a thread must be unblocked at the
350 *  end of a time based delay (i.e. wake after or wake when).
351 *  It is called by the watchdog handler.
352 *
353 *  @param[in] id is the thread id
354 *  @param[in] ignored is not used
355 */
356void _Thread_Delay_ended(
357  Objects_Id  id,
358  void       *ignored
359);
360
361RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
362  Thread_Control   *the_thread,
363  ISR_lock_Context *lock_context
364)
365{
366  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
367}
368
369RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
370  Thread_Control   *the_thread,
371  ISR_lock_Context *lock_context
372)
373{
374  _ISR_lock_ISR_disable( lock_context );
375  _Thread_State_acquire_critical( the_thread, lock_context );
376}
377
378RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
379  ISR_lock_Context *lock_context
380)
381{
382  Thread_Control *executing;
383
384  _ISR_lock_ISR_disable( lock_context );
385  executing = _Thread_Executing;
386  _Thread_State_acquire_critical( executing, lock_context );
387
388  return executing;
389}
390
391RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
392  Thread_Control   *the_thread,
393  ISR_lock_Context *lock_context
394)
395{
396  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
397}
398
399RTEMS_INLINE_ROUTINE void _Thread_State_release(
400  Thread_Control   *the_thread,
401  ISR_lock_Context *lock_context
402)
403{
404  _Thread_State_release_critical( the_thread, lock_context );
405  _ISR_lock_ISR_enable( lock_context );
406}
407
408#if defined(RTEMS_DEBUG)
409RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
410  const Thread_Control *the_thread
411)
412{
413  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
414}
415#endif
416
417/**
418 * @brief Performs the priority actions specified by the thread queue context
419 * along the thread queue path.
420 *
421 * The caller must be the owner of the thread wait lock.
422 *
423 * @param start_of_path The start thread of the thread queue path.
424 * @param queue_context The thread queue context specifying the thread queue
425 *   path and initial thread priority actions.
426 *
427 * @see _Thread_queue_Path_acquire_critical().
428 */
429void _Thread_Priority_perform_actions(
430  Thread_Control       *start_of_path,
431  Thread_queue_Context *queue_context
432);
433
434/**
435 * @brief Adds the specified thread priority node to the corresponding thread
436 * priority aggregation.
437 *
438 * The caller must be the owner of the thread wait lock.
439 *
440 * @param the_thread The thread.
441 * @param priority_node The thread priority node to add.
442 * @param queue_context The thread queue context to return an updated set of
443 *   threads for _Thread_Priority_update().  The thread queue context must be
444 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
445 *   call of this function.
446 *
447 * @see _Thread_Wait_acquire().
448 */
449void _Thread_Priority_add(
450  Thread_Control       *the_thread,
451  Priority_Node        *priority_node,
452  Thread_queue_Context *queue_context
453);
454
455/**
456 * @brief Removes the specified thread priority node from the corresponding
457 * thread priority aggregation.
458 *
459 * The caller must be the owner of the thread wait lock.
460 *
461 * @param the_thread The thread.
462 * @param priority_node The thread priority node to remove.
463 * @param queue_context The thread queue context to return an updated set of
464 *   threads for _Thread_Priority_update().  The thread queue context must be
465 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
466 *   call of this function.
467 *
468 * @see _Thread_Wait_acquire().
469 */
470void _Thread_Priority_remove(
471  Thread_Control       *the_thread,
472  Priority_Node        *priority_node,
473  Thread_queue_Context *queue_context
474);
475
476/**
477 * @brief Propagates a thread priority value change in the specified thread
478 * priority node to the corresponding thread priority aggregation.
479 *
480 * The caller must be the owner of the thread wait lock.
481 *
482 * @param the_thread The thread.
483 * @param priority_node The thread priority node to change.
484 * @param prepend_it In case this is true, then the thread is prepended to
485 *   its priority group in its home scheduler instance, otherwise it is
486 *   appended.
487 * @param queue_context The thread queue context to return an updated set of
488 *   threads for _Thread_Priority_update().  The thread queue context must be
489 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
490 *   call of this function.
491 *
492 * @see _Thread_Wait_acquire().
493 */
494void _Thread_Priority_changed(
495  Thread_Control       *the_thread,
496  Priority_Node        *priority_node,
497  bool                  prepend_it,
498  Thread_queue_Context *queue_context
499);
500
501/**
502 * @brief Changes the thread priority value of the specified thread priority
503 * node in the corresponding thread priority aggregation.
504 *
505 * The caller must be the owner of the thread wait lock.
506 *
507 * @param the_thread The thread.
508 * @param priority_node The thread priority node to change.
509 * @param new_priority The new thread priority value of the thread priority
510 *   node to change.
511 * @param prepend_it In case this is true, then the thread is prepended to
512 *   its priority group in its home scheduler instance, otherwise it is
513 *   appended.
514 * @param queue_context The thread queue context to return an updated set of
515 *   threads for _Thread_Priority_update().  The thread queue context must be
516 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
517 *   call of this function.
518 *
519 * @see _Thread_Wait_acquire().
520 */
521RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
522  Thread_Control       *the_thread,
523  Priority_Node        *priority_node,
524  Priority_Control      new_priority,
525  bool                  prepend_it,
526  Thread_queue_Context *queue_context
527)
528{
529  _Priority_Node_set_priority( priority_node, new_priority );
530  _Thread_Priority_changed(
531    the_thread,
532    priority_node,
533    prepend_it,
534    queue_context
535  );
536}
537
538/**
539 * @brief Replaces the victim priority node with the replacement priority node
540 * in the corresponding thread priority aggregation.
541 *
542 * The caller must be the owner of the thread wait lock.
543 *
544 * @param the_thread The thread.
545 * @param victim_node The victim thread priority node.
546 * @param replacement_node The replacement thread priority node.
547 *
548 * @see _Thread_Wait_acquire().
549 */
550void _Thread_Priority_replace(
551  Thread_Control *the_thread,
552  Priority_Node  *victim_node,
553  Priority_Node  *replacement_node
554);
555
556/**
557 * @brief Adds a priority node to the corresponding thread priority
558 * aggregation.
559 *
560 * The caller must be the owner of the thread wait lock.
561 *
562 * @param the_thread The thread.
563 * @param priority_node The thread priority node to add.
564 * @param queue_context The thread queue context to return an updated set of
565 *   threads for _Thread_Priority_update().  The thread queue context must be
566 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
567 *   call of this function.
568 *
569 * @see _Thread_Priority_add(), _Thread_Priority_change(),
570 *   _Thread_Priority_changed() and _Thread_Priority_remove().
571 */
572void _Thread_Priority_update( Thread_queue_Context *queue_context );
573
574/**
575 * @brief Returns true if the left thread priority is less than the right
576 * thread priority in the intuitive sense of priority and false otherwise.
577 */
578RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
579  Priority_Control left,
580  Priority_Control right
581)
582{
583  return left > right;
584}
585
586/**
587 * @brief Returns the highest priority of the left and right thread priorities
588 * in the intuitive sense of priority.
589 */
590RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
591  Priority_Control left,
592  Priority_Control right
593)
594{
595  return _Thread_Priority_less_than( left, right ) ? right : left;
596}
597
598RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
599  Objects_Id id
600)
601{
602  uint32_t the_api;
603
604  the_api = _Objects_Get_API( id );
605
606  if ( !_Objects_Is_api_valid( the_api ) ) {
607    return NULL;
608  }
609
610  /*
611   * Threads are always first class :)
612   *
613   * There is no need to validate the object class of the object identifier,
614   * since this will be done by the object get methods.
615   */
616  return _Objects_Information_table[ the_api ][ 1 ];
617}
618
619/**
620 * @brief Gets a thread by its identifier.
621 *
622 * @see _Objects_Get().
623 */
624Thread_Control *_Thread_Get(
625  Objects_Id         id,
626  ISR_lock_Context  *lock_context
627);
628
629RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
630  const Thread_Control *thread
631)
632{
633#if defined(RTEMS_SMP)
634  return thread->Scheduler.cpu;
635#else
636  (void) thread;
637
638  return _Per_CPU_Get();
639#endif
640}
641
642RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
643  Thread_Control *thread,
644  Per_CPU_Control *cpu
645)
646{
647#if defined(RTEMS_SMP)
648  thread->Scheduler.cpu = cpu;
649#else
650  (void) thread;
651  (void) cpu;
652#endif
653}
654
655/**
656 * This function returns true if the_thread is the currently executing
657 * thread, and false otherwise.
658 */
659
660RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
661  const Thread_Control *the_thread
662)
663{
664  return ( the_thread == _Thread_Executing );
665}
666
667#if defined(RTEMS_SMP)
668/**
669 * @brief Returns @a true in case the thread executes currently on some
670 * processor in the system, otherwise @a false.
671 *
672 * Do not confuse this with _Thread_Is_executing() which checks only the
673 * current processor.
674 */
675RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
676  const Thread_Control *the_thread
677)
678{
679  return _CPU_Context_Get_is_executing( &the_thread->Registers );
680}
681#endif
682
683/**
684 * This function returns true if the_thread is the heir
685 * thread, and false otherwise.
686 */
687
688RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
689  const Thread_Control *the_thread
690)
691{
692  return ( the_thread == _Thread_Heir );
693}
694
695/**
696 * This routine clears any blocking state for the_thread.  It performs
697 * any necessary scheduling operations including the selection of
698 * a new heir thread.
699 */
700
701RTEMS_INLINE_ROUTINE void _Thread_Unblock (
702  Thread_Control *the_thread
703)
704{
705  _Thread_Clear_state( the_thread, STATES_BLOCKED );
706}
707
708/**
709 * This function returns true if the floating point context of
710 * the_thread is currently loaded in the floating point unit, and
711 * false otherwise.
712 */
713
714#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
715RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
716  const Thread_Control *the_thread
717)
718{
719  return ( the_thread == _Thread_Allocated_fp );
720}
721#endif
722
723/*
724 *  If the CPU has hardware floating point, then we must address saving
725 *  and restoring it as part of the context switch.
726 *
727 *  The second conditional compilation section selects the algorithm used
728 *  to context switch between floating point tasks.  The deferred algorithm
729 *  can be significantly better in a system with few floating point tasks
730 *  because it reduces the total number of save and restore FP context
731 *  operations.  However, this algorithm can not be used on all CPUs due
732 *  to unpredictable use of FP registers by some compilers for integer
733 *  operations.
734 */
735
736RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
737{
738#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
739#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
740  if ( executing->fp_context != NULL )
741    _Context_Save_fp( &executing->fp_context );
742#endif
743#endif
744}
745
746RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
747{
748#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
749#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
750  if ( (executing->fp_context != NULL) &&
751       !_Thread_Is_allocated_fp( executing ) ) {
752    if ( _Thread_Allocated_fp != NULL )
753      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
754    _Context_Restore_fp( &executing->fp_context );
755    _Thread_Allocated_fp = executing;
756  }
757#else
758  if ( executing->fp_context != NULL )
759    _Context_Restore_fp( &executing->fp_context );
760#endif
761#endif
762}
763
764/**
765 * This routine is invoked when the currently loaded floating
766 * point context is now longer associated with an active thread.
767 */
768
769#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
770RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
771{
772  _Thread_Allocated_fp = NULL;
773}
774#endif
775
776/**
777 * This function returns true if dispatching is disabled, and false
778 * otherwise.
779 */
780
781RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
782{
783  return ( _Thread_Dispatch_necessary );
784}
785
786/**
787 * This function returns true if the_thread is NULL and false otherwise.
788 */
789
790RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
791  const Thread_Control *the_thread
792)
793{
794  return ( the_thread == NULL );
795}
796
797/**
798 * @brief Is proxy blocking.
799 *
800 * status which indicates that a proxy is blocking, and false otherwise.
801 */
802RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
803  uint32_t   code
804)
805{
806  return (code == THREAD_STATUS_PROXY_BLOCKING);
807}
808
809RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
810{
811  /* Idle threads */
812  uint32_t maximum_internal_threads =
813    rtems_configuration_get_maximum_processors();
814
815  /* MPCI thread */
816#if defined(RTEMS_MULTIPROCESSING)
817  if ( _System_state_Is_multiprocessing ) {
818    ++maximum_internal_threads;
819  }
820#endif
821
822  return maximum_internal_threads;
823}
824
825RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
826{
827  return (Thread_Control *)
828    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
829}
830
831/**
832 * @brief Gets the heir of the processor and makes it executing.
833 *
834 * Must be called with interrupts disabled.  The thread dispatch necessary
835 * indicator is cleared as a side-effect.
836 *
837 * @return The heir thread.
838 *
839 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
840 * _Thread_Dispatch_update_heir().
841 */
842RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
843  Per_CPU_Control *cpu_self
844)
845{
846  Thread_Control *heir;
847
848  heir = cpu_self->heir;
849  cpu_self->dispatch_necessary = false;
850  cpu_self->executing = heir;
851
852  return heir;
853}
854
855RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
856  Thread_Control  *the_thread,
857  Per_CPU_Control *cpu
858)
859{
860  Timestamp_Control last;
861  Timestamp_Control ran;
862
863  last = cpu->cpu_usage_timestamp;
864  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
865  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
866  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
867}
868
869#if defined( RTEMS_SMP )
870RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
871  Per_CPU_Control *cpu_self,
872  Per_CPU_Control *cpu_for_heir,
873  Thread_Control  *heir
874)
875{
876  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
877
878  cpu_for_heir->heir = heir;
879
880  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
881}
882#endif
883
884void _Thread_Get_CPU_time_used(
885  Thread_Control    *the_thread,
886  Timestamp_Control *cpu_time_used
887);
888
889RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
890  Thread_Action_control *action_control
891)
892{
893  _Chain_Initialize_empty( &action_control->Chain );
894}
895
896RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
897  Thread_Action *action
898)
899{
900  _Chain_Set_off_chain( &action->Node );
901}
902
903RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
904  Thread_Control        *the_thread,
905  Thread_Action         *action,
906  Thread_Action_handler  handler
907)
908{
909  Per_CPU_Control *cpu_of_thread;
910
911  _Assert( _Thread_State_is_owner( the_thread ) );
912
913  cpu_of_thread = _Thread_Get_CPU( the_thread );
914
915  action->handler = handler;
916
917  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
918
919  _Chain_Append_if_is_off_chain_unprotected(
920    &the_thread->Post_switch_actions.Chain,
921    &action->Node
922  );
923}
924
925RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
926  Thread_Life_state life_state
927)
928{
929  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
930}
931
932RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
933  Thread_Life_state life_state
934)
935{
936  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
937}
938
939RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
940  Thread_Life_state life_state
941)
942{
943  return ( life_state
944    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
945}
946
947RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
948  Thread_Life_state life_state
949)
950{
951  return ( life_state
952    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
953}
954
955RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
956  const Thread_Control *the_thread
957)
958{
959  _Assert( _Thread_State_is_owner( the_thread ) );
960  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
961}
962
963/**
964 * @brief Returns true if the thread owns resources, and false otherwise.
965 *
966 * Resources are accounted with the Thread_Control::resource_count resource
967 * counter.  This counter is used by semaphore objects for example.
968 *
969 * In addition to the resource counter there is a resource dependency tree
970 * available on SMP configurations.  In case this tree is non-empty, then the
971 * thread owns resources.
972 *
973 * @param[in] the_thread The thread.
974 */
975RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
976  const Thread_Control *the_thread
977)
978{
979  bool owns_resources = the_thread->resource_count != 0;
980
981#if defined(RTEMS_SMP)
982  owns_resources = owns_resources
983    || _Resource_Node_owns_resources( &the_thread->Resource_node );
984#endif
985
986  return owns_resources;
987}
988
989RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_own_node(
990  const Thread_Control *the_thread
991)
992{
993#if defined(RTEMS_SMP)
994  return the_thread->Scheduler.own_node;
995#else
996  return the_thread->Scheduler.nodes;
997#endif
998}
999
1000RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1001  const Thread_Control *the_thread,
1002  size_t                scheduler_index
1003)
1004{
1005#if defined(RTEMS_SMP)
1006  return (Scheduler_Node *)
1007    ( (uintptr_t) the_thread->Scheduler.nodes
1008      + scheduler_index * _Scheduler_Node_size );
1009#else
1010  _Assert( scheduler_index == 0 );
1011  (void) scheduler_index;
1012  return the_thread->Scheduler.nodes;
1013#endif
1014}
1015
1016/**
1017 * @brief Returns the priority of the thread.
1018 *
1019 * Returns the user API and thread wait information relevant thread priority.
1020 * This includes temporary thread priority adjustments due to locking
1021 * protocols, a job release or the POSIX sporadic server for example.
1022 *
1023 * @return The priority of the thread.
1024 */
1025RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1026  const Thread_Control *the_thread
1027)
1028{
1029  Scheduler_Node *scheduler_node;
1030
1031  scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1032  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1033}
1034
1035/**
1036 * @brief Acquires the thread wait default lock inside a critical section
1037 * (interrupts disabled).
1038 *
1039 * @param[in] the_thread The thread.
1040 * @param[in] lock_context The lock context used for the corresponding lock
1041 *   release.
1042 *
1043 * @see _Thread_Wait_release_default_critical().
1044 */
1045RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1046  Thread_Control   *the_thread,
1047  ISR_lock_Context *lock_context
1048)
1049{
1050  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1051}
1052
1053/**
1054 * @brief Acquires the thread wait default lock and returns the executing
1055 * thread.
1056 *
1057 * @param[in] lock_context The lock context used for the corresponding lock
1058 *   release.
1059 *
1060 * @return The executing thread.
1061 *
1062 * @see _Thread_Wait_release_default().
1063 */
1064RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1065  ISR_lock_Context *lock_context
1066)
1067{
1068  Thread_Control *executing;
1069
1070  _ISR_lock_ISR_disable( lock_context );
1071  executing = _Thread_Executing;
1072  _Thread_Wait_acquire_default_critical( executing, lock_context );
1073
1074  return executing;
1075}
1076
1077/**
1078 * @brief Acquires the thread wait default lock and disables interrupts.
1079 *
1080 * @param[in] the_thread The thread.
1081 * @param[in] lock_context The lock context used for the corresponding lock
1082 *   release.
1083 *
1084 * @see _Thread_Wait_release_default().
1085 */
1086RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1087  Thread_Control   *the_thread,
1088  ISR_lock_Context *lock_context
1089)
1090{
1091  _ISR_lock_ISR_disable( lock_context );
1092  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1093}
1094
1095/**
1096 * @brief Releases the thread wait default lock inside a critical section
1097 * (interrupts disabled).
1098 *
1099 * The previous interrupt status is not restored.
1100 *
1101 * @param[in] the_thread The thread.
1102 * @param[in] lock_context The lock context used for the corresponding lock
1103 *   acquire.
1104 */
1105RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1106  Thread_Control   *the_thread,
1107  ISR_lock_Context *lock_context
1108)
1109{
1110  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1111}
1112
1113/**
1114 * @brief Releases the thread wait default lock and restores the previous
1115 * interrupt status.
1116 *
1117 * @param[in] the_thread The thread.
1118 * @param[in] lock_context The lock context used for the corresponding lock
1119 *   acquire.
1120 */
1121RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1122  Thread_Control   *the_thread,
1123  ISR_lock_Context *lock_context
1124)
1125{
1126  _Thread_Wait_release_default_critical( the_thread, lock_context );
1127  _ISR_lock_ISR_enable( lock_context );
1128}
1129
1130#if defined(RTEMS_SMP)
1131#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1132  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1133
1134RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1135  Thread_Control            *the_thread,
1136  Thread_queue_Lock_context *queue_lock_context
1137)
1138{
1139  Chain_Node *first;
1140
1141  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1142  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1143
1144  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1145    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1146  }
1147}
1148
1149RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1150  Thread_queue_Queue        *queue,
1151  Thread_queue_Lock_context *queue_lock_context
1152)
1153{
1154  _Thread_queue_Queue_acquire_critical(
1155    queue,
1156    &_Thread_Executing->Potpourri_stats,
1157    &queue_lock_context->Lock_context
1158  );
1159}
1160
1161RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1162  Thread_queue_Queue        *queue,
1163  Thread_queue_Lock_context *queue_lock_context
1164)
1165{
1166  _Thread_queue_Queue_release_critical(
1167    queue,
1168    &queue_lock_context->Lock_context
1169  );
1170}
1171#endif
1172
1173/**
1174 * @brief Acquires the thread wait lock inside a critical section (interrupts
1175 * disabled).
1176 *
1177 * @param[in] the_thread The thread.
1178 * @param[in] queue_context The thread queue context for the corresponding
1179 *   _Thread_Wait_release_critical().
1180 *
1181 * @see _Thread_queue_Context_initialize().
1182 */
1183RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1184  Thread_Control       *the_thread,
1185  Thread_queue_Context *queue_context
1186)
1187{
1188#if defined(RTEMS_SMP)
1189  Thread_queue_Queue *queue;
1190
1191  _Thread_Wait_acquire_default_critical(
1192    the_thread,
1193    &queue_context->Lock_context.Lock_context
1194  );
1195
1196  queue = the_thread->Wait.queue;
1197  queue_context->Lock_context.Wait.queue = queue;
1198
1199  if ( queue != NULL ) {
1200    _Thread_queue_Gate_add(
1201      &the_thread->Wait.Lock.Pending_requests,
1202      &queue_context->Lock_context.Wait.Gate
1203    );
1204    _Thread_Wait_release_default_critical(
1205      the_thread,
1206      &queue_context->Lock_context.Lock_context
1207    );
1208    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1209
1210    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1211      _Thread_Wait_release_queue_critical(
1212        queue,
1213        &queue_context->Lock_context
1214      );
1215      _Thread_Wait_acquire_default_critical(
1216        the_thread,
1217        &queue_context->Lock_context.Lock_context
1218      );
1219      _Thread_Wait_remove_request_locked(
1220        the_thread,
1221        &queue_context->Lock_context
1222      );
1223      _Assert( the_thread->Wait.queue == NULL );
1224    }
1225  }
1226#else
1227  (void) the_thread;
1228  (void) queue_context;
1229#endif
1230}
1231
1232/**
1233 * @brief Acquires the thread wait default lock and disables interrupts.
1234 *
1235 * @param[in] the_thread The thread.
1236 * @param[in] queue_context The thread queue context for the corresponding
1237 *   _Thread_Wait_release().
1238 */
1239RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1240  Thread_Control       *the_thread,
1241  Thread_queue_Context *queue_context
1242)
1243{
1244  _Thread_queue_Context_initialize( queue_context );
1245  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1246  _Thread_Wait_acquire_critical( the_thread, queue_context );
1247}
1248
1249/**
1250 * @brief Releases the thread wait lock inside a critical section (interrupts
1251 * disabled).
1252 *
1253 * The previous interrupt status is not restored.
1254 *
1255 * @param[in] the_thread The thread.
1256 * @param[in] queue_context The thread queue context used for corresponding
1257 *   _Thread_Wait_acquire_critical().
1258 */
1259RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1260  Thread_Control       *the_thread,
1261  Thread_queue_Context *queue_context
1262)
1263{
1264#if defined(RTEMS_SMP)
1265  Thread_queue_Queue *queue;
1266
1267  queue = queue_context->Lock_context.Wait.queue;
1268
1269  if ( queue != NULL ) {
1270    _Thread_Wait_release_queue_critical(
1271      queue, &queue_context->Lock_context
1272    );
1273    _Thread_Wait_acquire_default_critical(
1274      the_thread,
1275      &queue_context->Lock_context.Lock_context
1276    );
1277    _Thread_Wait_remove_request_locked(
1278      the_thread,
1279      &queue_context->Lock_context
1280    );
1281  }
1282
1283  _Thread_Wait_release_default_critical(
1284    the_thread,
1285    &queue_context->Lock_context.Lock_context
1286  );
1287#else
1288  (void) the_thread;
1289  (void) queue_context;
1290#endif
1291}
1292
1293/**
1294 * @brief Releases the thread wait lock and restores the previous interrupt
1295 * status.
1296 *
1297 * @param[in] the_thread The thread.
1298 * @param[in] queue_context The thread queue context used for corresponding
1299 *   _Thread_Wait_acquire().
1300 */
1301RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1302  Thread_Control       *the_thread,
1303  Thread_queue_Context *queue_context
1304)
1305{
1306  _Thread_Wait_release_critical( the_thread, queue_context );
1307  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1308}
1309
1310/**
1311 * @brief Claims the thread wait queue and operations.
1312 *
1313 * The caller must not be the owner of the default thread wait lock.  The
1314 * caller must be the owner of the corresponding thread queue lock.
1315 *
1316 * @param[in] the_thread The thread.
1317 * @param[in] queue The new thread queue.
1318 * @param[in] operations The new thread operations.
1319 *
1320 * @see _Thread_Wait_restore_default().
1321 */
1322RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1323  Thread_Control                *the_thread,
1324  Thread_queue_Queue            *queue,
1325  const Thread_queue_Operations *operations
1326)
1327{
1328  ISR_lock_Context lock_context;
1329
1330  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1331
1332  _Assert( the_thread->Wait.queue == NULL );
1333
1334#if defined(RTEMS_SMP)
1335  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1336  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1337  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1338#endif
1339
1340  the_thread->Wait.queue = queue;
1341  the_thread->Wait.operations = operations;
1342
1343  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1344}
1345
1346/**
1347 * @brief Removes a thread wait lock request.
1348 *
1349 * On SMP configurations, removes a thread wait lock request.
1350 *
1351 * On other configurations, this function does nothing.
1352 *
1353 * @param[in] the_thread The thread.
1354 * @param[in] queue_lock_context The thread queue lock context used for
1355 *   corresponding _Thread_Wait_acquire().
1356 */
1357RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1358  Thread_Control            *the_thread,
1359  Thread_queue_Lock_context *queue_lock_context
1360)
1361{
1362#if defined(RTEMS_SMP)
1363  ISR_lock_Context lock_context;
1364
1365  _Thread_Wait_acquire_default( the_thread, &lock_context );
1366  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1367  _Thread_Wait_release_default( the_thread, &lock_context );
1368#else
1369  (void) the_thread;
1370  (void) queue_lock_context;
1371#endif
1372}
1373
1374/**
1375 * @brief Restores the default thread wait queue and operations.
1376 *
1377 * The caller must be the owner of the current thread wait queue lock.
1378 *
1379 * On SMP configurations, the pending requests are updated to use the stale
1380 * thread queue operations.
1381 *
1382 * @param[in] the_thread The thread.
1383 *
1384 * @see _Thread_Wait_claim().
1385 */
1386RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1387  Thread_Control *the_thread
1388)
1389{
1390#if defined(RTEMS_SMP)
1391  ISR_lock_Context  lock_context;
1392  Chain_Node       *node;
1393  const Chain_Node *tail;
1394
1395  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1396
1397  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1398  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1399
1400  if ( node != tail ) {
1401    do {
1402      Thread_queue_Context *queue_context;
1403
1404      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1405      queue_context->Lock_context.Wait.queue = NULL;
1406
1407      node = _Chain_Next( node );
1408    } while ( node != tail );
1409
1410    _Thread_queue_Gate_add(
1411      &the_thread->Wait.Lock.Pending_requests,
1412      &the_thread->Wait.Lock.Tranquilizer
1413    );
1414  } else {
1415    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1416  }
1417#endif
1418
1419  the_thread->Wait.queue = NULL;
1420  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1421
1422#if defined(RTEMS_SMP)
1423  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1424#endif
1425}
1426
1427/**
1428 * @brief Tranquilizes the thread after a wait on a thread queue.
1429 *
1430 * After the violent blocking procedure this function makes the thread calm and
1431 * peaceful again so that it can carry out its normal work.
1432 *
1433 * On SMP configurations, ensures that all pending thread wait lock requests
1434 * completed before the thread is able to begin a new thread wait procedure.
1435 *
1436 * On other configurations, this function does nothing.
1437 *
1438 * It must be called after a _Thread_Wait_claim() exactly once
1439 *  - after the corresponding thread queue lock was released, and
1440 *  - the default wait state is restored or some other processor is about to do
1441 *    this.
1442 *
1443 * @param[in] the_thread The thread.
1444 */
1445RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1446  Thread_Control *the_thread
1447)
1448{
1449#if defined(RTEMS_SMP)
1450  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1451#else
1452  (void) the_thread;
1453#endif
1454}
1455
1456/**
1457 * @brief Cancels a thread wait on a thread queue.
1458 *
1459 * @param[in] the_thread The thread.
1460 * @param[in] queue_context The thread queue context used for corresponding
1461 *   _Thread_Wait_acquire().
1462 */
1463RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1464  Thread_Control       *the_thread,
1465  Thread_queue_Context *queue_context
1466)
1467{
1468  Thread_queue_Queue *queue;
1469
1470  queue = the_thread->Wait.queue;
1471
1472#if defined(RTEMS_SMP)
1473  if ( queue != NULL ) {
1474    _Assert( queue_context->Lock_context.Wait.queue == queue );
1475#endif
1476
1477    ( *the_thread->Wait.operations->extract )(
1478      queue,
1479      the_thread,
1480      queue_context
1481    );
1482    _Thread_Wait_restore_default( the_thread );
1483
1484#if defined(RTEMS_SMP)
1485    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1486    queue_context->Lock_context.Wait.queue = queue;
1487  }
1488#endif
1489}
1490
1491/**
1492 * @brief The initial thread wait flags value set by _Thread_Initialize().
1493 */
1494#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1495
1496/**
1497 * @brief Mask to get the thread wait state flags.
1498 */
1499#define THREAD_WAIT_STATE_MASK 0xffU
1500
1501/**
1502 * @brief Indicates that the thread begins with the blocking operation.
1503 *
1504 * A blocking operation consists of an optional watchdog initialization and the
1505 * setting of the appropriate thread blocking state with the corresponding
1506 * scheduler block operation.
1507 */
1508#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1509
1510/**
1511 * @brief Indicates that the thread completed the blocking operation.
1512 */
1513#define THREAD_WAIT_STATE_BLOCKED 0x2U
1514
1515/**
1516 * @brief Indicates that a condition to end the thread wait occurred.
1517 *
1518 * This could be a timeout, a signal, an event or a resource availability.
1519 */
1520#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1521
1522/**
1523 * @brief Mask to get the thread wait class flags.
1524 */
1525#define THREAD_WAIT_CLASS_MASK 0xff00U
1526
1527/**
1528 * @brief Indicates that the thread waits for an event.
1529 */
1530#define THREAD_WAIT_CLASS_EVENT 0x100U
1531
1532/**
1533 * @brief Indicates that the thread waits for a system event.
1534 */
1535#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1536
1537/**
1538 * @brief Indicates that the thread waits for an object.
1539 */
1540#define THREAD_WAIT_CLASS_OBJECT 0x400U
1541
1542/**
1543 * @brief Indicates that the thread waits for a period.
1544 */
1545#define THREAD_WAIT_CLASS_PERIOD 0x800U
1546
1547RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1548  Thread_Control    *the_thread,
1549  Thread_Wait_flags  flags
1550)
1551{
1552#if defined(RTEMS_SMP)
1553  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1554#else
1555  the_thread->Wait.flags = flags;
1556#endif
1557}
1558
1559RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1560  const Thread_Control *the_thread
1561)
1562{
1563#if defined(RTEMS_SMP)
1564  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1565#else
1566  return the_thread->Wait.flags;
1567#endif
1568}
1569
1570/**
1571 * @brief Tries to change the thread wait flags with release semantics in case
1572 * of success.
1573 *
1574 * Must be called inside a critical section (interrupts disabled).
1575 *
1576 * In case the wait flags are equal to the expected wait flags, then the wait
1577 * flags are set to the desired wait flags.
1578 *
1579 * @param[in] the_thread The thread.
1580 * @param[in] expected_flags The expected wait flags.
1581 * @param[in] desired_flags The desired wait flags.
1582 *
1583 * @retval true The wait flags were equal to the expected wait flags.
1584 * @retval false Otherwise.
1585 */
1586RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1587  Thread_Control    *the_thread,
1588  Thread_Wait_flags  expected_flags,
1589  Thread_Wait_flags  desired_flags
1590)
1591{
1592  _Assert( _ISR_Get_level() != 0 );
1593
1594#if defined(RTEMS_SMP)
1595  return _Atomic_Compare_exchange_uint(
1596    &the_thread->Wait.flags,
1597    &expected_flags,
1598    desired_flags,
1599    ATOMIC_ORDER_RELEASE,
1600    ATOMIC_ORDER_RELAXED
1601  );
1602#else
1603  bool success = ( the_thread->Wait.flags == expected_flags );
1604
1605  if ( success ) {
1606    the_thread->Wait.flags = desired_flags;
1607  }
1608
1609  return success;
1610#endif
1611}
1612
1613/**
1614 * @brief Tries to change the thread wait flags with acquire semantics.
1615 *
1616 * In case the wait flags are equal to the expected wait flags, then the wait
1617 * flags are set to the desired wait flags.
1618 *
1619 * @param[in] the_thread The thread.
1620 * @param[in] expected_flags The expected wait flags.
1621 * @param[in] desired_flags The desired wait flags.
1622 *
1623 * @retval true The wait flags were equal to the expected wait flags.
1624 * @retval false Otherwise.
1625 */
1626RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1627  Thread_Control    *the_thread,
1628  Thread_Wait_flags  expected_flags,
1629  Thread_Wait_flags  desired_flags
1630)
1631{
1632  bool success;
1633#if defined(RTEMS_SMP)
1634  return _Atomic_Compare_exchange_uint(
1635    &the_thread->Wait.flags,
1636    &expected_flags,
1637    desired_flags,
1638    ATOMIC_ORDER_ACQUIRE,
1639    ATOMIC_ORDER_ACQUIRE
1640  );
1641#else
1642  ISR_Level level;
1643
1644  _ISR_Local_disable( level );
1645
1646  success = _Thread_Wait_flags_try_change_release(
1647    the_thread,
1648    expected_flags,
1649    desired_flags
1650  );
1651
1652  _ISR_Local_enable( level );
1653#endif
1654
1655  return success;
1656}
1657
1658/**
1659 * @brief Returns the object identifier of the object containing the current
1660 * thread wait queue.
1661 *
1662 * This function may be used for debug and system information purposes.  The
1663 * caller must be the owner of the thread lock.
1664 *
1665 * @retval 0 The thread waits on no thread queue currently, the thread wait
1666 *   queue is not contained in an object, or the current thread state provides
1667 *   insufficient information, e.g. the thread is in the middle of a blocking
1668 *   operation.
1669 * @retval other The object identifier of the object containing the thread wait
1670 *   queue.
1671 */
1672Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1673
1674RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1675  const Thread_Control *the_thread
1676)
1677{
1678  return (Status_Control) the_thread->Wait.return_code;
1679}
1680
1681/**
1682 * @brief General purpose thread wait timeout.
1683 *
1684 * @param[in] watchdog The thread timer watchdog.
1685 */
1686void _Thread_Timeout( Watchdog_Control *watchdog );
1687
1688RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1689  Thread_Timer_information *timer,
1690  Per_CPU_Control          *cpu
1691)
1692{
1693  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1694  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1695  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1696}
1697
1698RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative(
1699  Thread_Control                 *the_thread,
1700  Per_CPU_Control                *cpu,
1701  Watchdog_Service_routine_entry  routine,
1702  Watchdog_Interval               ticks
1703)
1704{
1705  ISR_lock_Context lock_context;
1706
1707  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1708
1709  the_thread->Timer.header =
1710    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1711  the_thread->Timer.Watchdog.routine = routine;
1712  _Watchdog_Per_CPU_insert_relative( &the_thread->Timer.Watchdog, cpu, ticks );
1713
1714  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1715}
1716
1717RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_absolute(
1718  Thread_Control                 *the_thread,
1719  Per_CPU_Control                *cpu,
1720  Watchdog_Service_routine_entry  routine,
1721  uint64_t                        expire
1722)
1723{
1724  ISR_lock_Context lock_context;
1725
1726  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1727
1728  the_thread->Timer.header =
1729    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_ABSOLUTE ];
1730  the_thread->Timer.Watchdog.routine = routine;
1731  _Watchdog_Per_CPU_insert_absolute( &the_thread->Timer.Watchdog, cpu, expire );
1732
1733  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1734}
1735
1736RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1737{
1738  ISR_lock_Context lock_context;
1739
1740  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1741
1742  _Watchdog_Per_CPU_remove(
1743    &the_thread->Timer.Watchdog,
1744#if defined(RTEMS_SMP)
1745    the_thread->Timer.Watchdog.cpu,
1746#else
1747    _Per_CPU_Get(),
1748#endif
1749    the_thread->Timer.header
1750  );
1751
1752  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1753}
1754
1755RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1756  Thread_Control     *the_thread,
1757  Thread_queue_Queue *queue
1758)
1759{
1760  _Thread_Wait_tranquilize( the_thread );
1761  _Thread_Timer_remove( the_thread );
1762
1763#if defined(RTEMS_MULTIPROCESSING)
1764  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1765    _Thread_Unblock( the_thread );
1766  } else {
1767    _Thread_queue_Unblock_proxy( queue, the_thread );
1768  }
1769#else
1770  (void) queue;
1771  _Thread_Unblock( the_thread );
1772#endif
1773}
1774
1775/** @}*/
1776
1777#ifdef __cplusplus
1778}
1779#endif
1780
1781#if defined(RTEMS_MULTIPROCESSING)
1782#include <rtems/score/threadmp.h>
1783#endif
1784
1785#endif
1786/* end of include file */
Note: See TracBrowser for help on using the repository browser.