source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 0dd49d0

5
Last change on this file since 0dd49d0 was 0dd49d0, checked in by Sebastian Huber <sebastian.huber@…>, on 10/31/16 at 15:10:32

score: Introduce thread resource count methods

This makes it easier to conditionally enable/disable the thread resource
count usage.

  • Property mode set to 100644
File size: 51.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2016 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/schedulernodeimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/threadqimpl.h>
35#include <rtems/score/todimpl.h>
36#include <rtems/score/freechain.h>
37#include <rtems/score/watchdogimpl.h>
38#include <rtems/config.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/**
45 * @addtogroup ScoreThread
46 */
47/**@{**/
48
49/**
50 *  The following structure contains the information necessary to manage
51 *  a thread which it is  waiting for a resource.
52 */
53#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
54
55/**
56 *  Self for the GNU Ada Run-Time
57 */
58extern void *rtems_ada_self;
59
60typedef struct {
61  Objects_Information Objects;
62
63  Freechain_Control Free_thread_queue_heads;
64} Thread_Information;
65
66/**
67 *  The following defines the information control block used to
68 *  manage this class of objects.
69 */
70extern Thread_Information _Thread_Internal_information;
71
72/**
73 *  The following points to the thread whose floating point
74 *  context is currently loaded.
75 */
76#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
77extern Thread_Control *_Thread_Allocated_fp;
78#endif
79
80#if defined(RTEMS_SMP)
81#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
82  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
83#endif
84
85typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
86
87void _Thread_Iterate(
88  Thread_Visitor  visitor,
89  void           *arg
90);
91
92void _Thread_Initialize_information(
93  Thread_Information  *information,
94  Objects_APIs         the_api,
95  uint16_t             the_class,
96  uint32_t             maximum,
97  bool                 is_string,
98  uint32_t             maximum_name_length
99);
100
101/**
102 *  @brief Initialize thread handler.
103 *
104 *  This routine performs the initialization necessary for this handler.
105 */
106void _Thread_Handler_initialization(void);
107
108/**
109 *  @brief Create idle thread.
110 *
111 *  This routine creates the idle thread.
112 *
113 *  @warning No thread should be created before this one.
114 */
115void _Thread_Create_idle(void);
116
117/**
118 *  @brief Start thread multitasking.
119 *
120 *  This routine initiates multitasking.  It is invoked only as
121 *  part of initialization and its invocation is the last act of
122 *  the non-multitasking part of the system initialization.
123 */
124void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
125
126/**
127 *  @brief Allocate the requested stack space for the thread.
128 *
129 *  Allocate the requested stack space for the thread.
130 *  Set the Start.stack field to the address of the stack.
131 *
132 *  @param[in] the_thread is the thread where the stack space is requested
133 *  @param[in] stack_size is the stack space is requested
134 *
135 *  @retval actual size allocated after any adjustment
136 *  @retval zero if the allocation failed
137 */
138size_t _Thread_Stack_Allocate(
139  Thread_Control *the_thread,
140  size_t          stack_size
141);
142
143/**
144 *  @brief Deallocate thread stack.
145 *
146 *  Deallocate the Thread's stack.
147 */
148void _Thread_Stack_Free(
149  Thread_Control *the_thread
150);
151
152/**
153 *  @brief Initialize thread.
154 *
155 *  This routine initializes the specified the thread.  It allocates
156 *  all memory associated with this thread.  It completes by adding
157 *  the thread to the local object table so operations on this
158 *  thread id are allowed.
159 *
160 *  @note If stack_area is NULL, it is allocated from the workspace.
161 *
162 *  @note If the stack is allocated from the workspace, then it is
163 *        guaranteed to be of at least minimum size.
164 */
165bool _Thread_Initialize(
166  Thread_Information                   *information,
167  Thread_Control                       *the_thread,
168  const struct Scheduler_Control       *scheduler,
169  void                                 *stack_area,
170  size_t                                stack_size,
171  bool                                  is_fp,
172  Priority_Control                      priority,
173  bool                                  is_preemptible,
174  Thread_CPU_budget_algorithms          budget_algorithm,
175  Thread_CPU_budget_algorithm_callout   budget_callout,
176  uint32_t                              isr_level,
177  Objects_Name                          name
178);
179
180/**
181 *  @brief Initializes thread and executes it.
182 *
183 *  This routine initializes the executable information for a thread
184 *  and makes it ready to execute.  After this routine executes, the
185 *  thread competes with all other threads for CPU time.
186 *
187 *  @param the_thread The thread to be started.
188 *  @param entry The thread entry information.
189 */
190bool _Thread_Start(
191  Thread_Control                 *the_thread,
192  const Thread_Entry_information *entry,
193  ISR_lock_Context               *lock_context
194);
195
196void _Thread_Restart_self(
197  Thread_Control                 *executing,
198  const Thread_Entry_information *entry,
199  ISR_lock_Context               *lock_context
200) RTEMS_NO_RETURN;
201
202bool _Thread_Restart_other(
203  Thread_Control                 *the_thread,
204  const Thread_Entry_information *entry,
205  ISR_lock_Context               *lock_context
206);
207
208void _Thread_Yield( Thread_Control *executing );
209
210Thread_Life_state _Thread_Change_life(
211  Thread_Life_state clear,
212  Thread_Life_state set,
213  Thread_Life_state ignore
214);
215
216Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
217
218/**
219 * @brief Kills all zombie threads in the system.
220 *
221 * Threads change into the zombie state as the last step in the thread
222 * termination sequence right before a context switch to the heir thread is
223 * initiated.  Since the thread stack is still in use during this phase we have
224 * to postpone the thread stack reclamation until this point.  On SMP
225 * configurations we may have to busy wait for context switch completion here.
226 */
227void _Thread_Kill_zombies( void );
228
229void _Thread_Exit(
230  Thread_Control    *executing,
231  Thread_Life_state  set,
232  void              *exit_value
233);
234
235void _Thread_Join(
236  Thread_Control       *the_thread,
237  States_Control        waiting_for_join,
238  Thread_Control       *executing,
239  Thread_queue_Context *queue_context
240);
241
242void _Thread_Cancel(
243  Thread_Control *the_thread,
244  Thread_Control *executing,
245  void           *exit_value
246);
247
248/**
249 * @brief Closes the thread.
250 *
251 * Closes the thread object and starts the thread termination sequence.  In
252 * case the executing thread is not terminated, then this function waits until
253 * the terminating thread reached the zombie state.
254 */
255void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
256
257RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
258{
259  return _States_Is_ready( the_thread->current_state );
260}
261
262States_Control _Thread_Clear_state_locked(
263  Thread_Control *the_thread,
264  States_Control  state
265);
266
267/**
268 * @brief Clears the specified thread state.
269 *
270 * In case the previous state is a non-ready state and the next state is the
271 * ready state, then the thread is unblocked by the scheduler.
272 *
273 * @param[in] the_thread The thread.
274 * @param[in] state The state to clear.  It must not be zero.
275 *
276 * @return The previous state.
277 */
278States_Control _Thread_Clear_state(
279  Thread_Control *the_thread,
280  States_Control  state
281);
282
283States_Control _Thread_Set_state_locked(
284  Thread_Control *the_thread,
285  States_Control  state
286);
287
288/**
289 * @brief Sets the specified thread state.
290 *
291 * In case the previous state is the ready state, then the thread is blocked by
292 * the scheduler.
293 *
294 * @param[in] the_thread The thread.
295 * @param[in] state The state to set.  It must not be zero.
296 *
297 * @return The previous state.
298 */
299States_Control _Thread_Set_state(
300  Thread_Control *the_thread,
301  States_Control  state
302);
303
304/**
305 *  @brief Initializes enviroment for a thread.
306 *
307 *  This routine initializes the context of @a the_thread to its
308 *  appropriate starting state.
309 *
310 *  @param[in] the_thread is the pointer to the thread control block.
311 */
312void _Thread_Load_environment(
313  Thread_Control *the_thread
314);
315
316void _Thread_Entry_adaptor_idle( Thread_Control *executing );
317
318void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
319
320void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
321
322/**
323 *  @brief Wrapper function for all threads.
324 *
325 *  This routine is the wrapper function for all threads.  It is
326 *  the starting point for all threads.  The user provided thread
327 *  entry point is invoked by this routine.  Operations
328 *  which must be performed immediately before and after the user's
329 *  thread executes are found here.
330 *
331 *  @note On entry, it is assumed all interrupts are blocked and that this
332 *  routine needs to set the initial isr level.  This may or may not
333 *  actually be needed by the context switch routine and as a result
334 *  interrupts may already be at there proper level.  Either way,
335 *  setting the initial isr level properly here is safe.
336 */
337void _Thread_Handler( void );
338
339/**
340 * @brief Executes the global constructors and then restarts itself as the
341 * first initialization thread.
342 *
343 * The first initialization thread is the first RTEMS initialization task or
344 * the first POSIX initialization thread in case no RTEMS initialization tasks
345 * are present.
346 */
347void _Thread_Global_construction(
348  Thread_Control                 *executing,
349  const Thread_Entry_information *entry
350) RTEMS_NO_RETURN;
351
352/**
353 *  @brief Ended the delay of a thread.
354 *
355 *  This routine is invoked when a thread must be unblocked at the
356 *  end of a time based delay (i.e. wake after or wake when).
357 *  It is called by the watchdog handler.
358 *
359 *  @param[in] id is the thread id
360 *  @param[in] ignored is not used
361 */
362void _Thread_Delay_ended(
363  Objects_Id  id,
364  void       *ignored
365);
366
367RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
368  Thread_Control   *the_thread,
369  ISR_lock_Context *lock_context
370)
371{
372  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
373}
374
375RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
376  Thread_Control   *the_thread,
377  ISR_lock_Context *lock_context
378)
379{
380  _ISR_lock_ISR_disable( lock_context );
381  _Thread_State_acquire_critical( the_thread, lock_context );
382}
383
384RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
385  ISR_lock_Context *lock_context
386)
387{
388  Thread_Control *executing;
389
390  _ISR_lock_ISR_disable( lock_context );
391  executing = _Thread_Executing;
392  _Thread_State_acquire_critical( executing, lock_context );
393
394  return executing;
395}
396
397RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
398  Thread_Control   *the_thread,
399  ISR_lock_Context *lock_context
400)
401{
402  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
403}
404
405RTEMS_INLINE_ROUTINE void _Thread_State_release(
406  Thread_Control   *the_thread,
407  ISR_lock_Context *lock_context
408)
409{
410  _Thread_State_release_critical( the_thread, lock_context );
411  _ISR_lock_ISR_enable( lock_context );
412}
413
414#if defined(RTEMS_DEBUG)
415RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
416  const Thread_Control *the_thread
417)
418{
419  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
420}
421#endif
422
423/**
424 * @brief Performs the priority actions specified by the thread queue context
425 * along the thread queue path.
426 *
427 * The caller must be the owner of the thread wait lock.
428 *
429 * @param start_of_path The start thread of the thread queue path.
430 * @param queue_context The thread queue context specifying the thread queue
431 *   path and initial thread priority actions.
432 *
433 * @see _Thread_queue_Path_acquire_critical().
434 */
435void _Thread_Priority_perform_actions(
436  Thread_Control       *start_of_path,
437  Thread_queue_Context *queue_context
438);
439
440/**
441 * @brief Adds the specified thread priority node to the corresponding thread
442 * priority aggregation.
443 *
444 * The caller must be the owner of the thread wait lock.
445 *
446 * @param the_thread The thread.
447 * @param priority_node The thread priority node to add.
448 * @param queue_context The thread queue context to return an updated set of
449 *   threads for _Thread_Priority_update().  The thread queue context must be
450 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
451 *   call of this function.
452 *
453 * @see _Thread_Wait_acquire().
454 */
455void _Thread_Priority_add(
456  Thread_Control       *the_thread,
457  Priority_Node        *priority_node,
458  Thread_queue_Context *queue_context
459);
460
461/**
462 * @brief Removes the specified thread priority node from the corresponding
463 * thread priority aggregation.
464 *
465 * The caller must be the owner of the thread wait lock.
466 *
467 * @param the_thread The thread.
468 * @param priority_node The thread priority node to remove.
469 * @param queue_context The thread queue context to return an updated set of
470 *   threads for _Thread_Priority_update().  The thread queue context must be
471 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
472 *   call of this function.
473 *
474 * @see _Thread_Wait_acquire().
475 */
476void _Thread_Priority_remove(
477  Thread_Control       *the_thread,
478  Priority_Node        *priority_node,
479  Thread_queue_Context *queue_context
480);
481
482/**
483 * @brief Propagates a thread priority value change in the specified thread
484 * priority node to the corresponding thread priority aggregation.
485 *
486 * The caller must be the owner of the thread wait lock.
487 *
488 * @param the_thread The thread.
489 * @param priority_node The thread priority node to change.
490 * @param prepend_it In case this is true, then the thread is prepended to
491 *   its priority group in its home scheduler instance, otherwise it is
492 *   appended.
493 * @param queue_context The thread queue context to return an updated set of
494 *   threads for _Thread_Priority_update().  The thread queue context must be
495 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
496 *   call of this function.
497 *
498 * @see _Thread_Wait_acquire().
499 */
500void _Thread_Priority_changed(
501  Thread_Control       *the_thread,
502  Priority_Node        *priority_node,
503  bool                  prepend_it,
504  Thread_queue_Context *queue_context
505);
506
507/**
508 * @brief Changes the thread priority value of the specified thread priority
509 * node in the corresponding thread priority aggregation.
510 *
511 * The caller must be the owner of the thread wait lock.
512 *
513 * @param the_thread The thread.
514 * @param priority_node The thread priority node to change.
515 * @param new_priority The new thread priority value of the thread priority
516 *   node to change.
517 * @param prepend_it In case this is true, then the thread is prepended to
518 *   its priority group in its home scheduler instance, otherwise it is
519 *   appended.
520 * @param queue_context The thread queue context to return an updated set of
521 *   threads for _Thread_Priority_update().  The thread queue context must be
522 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
523 *   call of this function.
524 *
525 * @see _Thread_Wait_acquire().
526 */
527RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
528  Thread_Control       *the_thread,
529  Priority_Node        *priority_node,
530  Priority_Control      new_priority,
531  bool                  prepend_it,
532  Thread_queue_Context *queue_context
533)
534{
535  _Priority_Node_set_priority( priority_node, new_priority );
536  _Thread_Priority_changed(
537    the_thread,
538    priority_node,
539    prepend_it,
540    queue_context
541  );
542}
543
544/**
545 * @brief Replaces the victim priority node with the replacement priority node
546 * in the corresponding thread priority aggregation.
547 *
548 * The caller must be the owner of the thread wait lock.
549 *
550 * @param the_thread The thread.
551 * @param victim_node The victim thread priority node.
552 * @param replacement_node The replacement thread priority node.
553 *
554 * @see _Thread_Wait_acquire().
555 */
556void _Thread_Priority_replace(
557  Thread_Control *the_thread,
558  Priority_Node  *victim_node,
559  Priority_Node  *replacement_node
560);
561
562/**
563 * @brief Adds a priority node to the corresponding thread priority
564 * aggregation.
565 *
566 * The caller must be the owner of the thread wait lock.
567 *
568 * @param the_thread The thread.
569 * @param priority_node The thread priority node to add.
570 * @param queue_context The thread queue context to return an updated set of
571 *   threads for _Thread_Priority_update().  The thread queue context must be
572 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
573 *   call of this function.
574 *
575 * @see _Thread_Priority_add(), _Thread_Priority_change(),
576 *   _Thread_Priority_changed() and _Thread_Priority_remove().
577 */
578void _Thread_Priority_update( Thread_queue_Context *queue_context );
579
580#if defined(RTEMS_SMP)
581void _Thread_Priority_and_sticky_update(
582  Thread_Control *the_thread,
583  int             sticky_level_change
584);
585#endif
586
587/**
588 * @brief Returns true if the left thread priority is less than the right
589 * thread priority in the intuitive sense of priority and false otherwise.
590 */
591RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
592  Priority_Control left,
593  Priority_Control right
594)
595{
596  return left > right;
597}
598
599/**
600 * @brief Returns the highest priority of the left and right thread priorities
601 * in the intuitive sense of priority.
602 */
603RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
604  Priority_Control left,
605  Priority_Control right
606)
607{
608  return _Thread_Priority_less_than( left, right ) ? right : left;
609}
610
611RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
612  Objects_Id id
613)
614{
615  uint32_t the_api;
616
617  the_api = _Objects_Get_API( id );
618
619  if ( !_Objects_Is_api_valid( the_api ) ) {
620    return NULL;
621  }
622
623  /*
624   * Threads are always first class :)
625   *
626   * There is no need to validate the object class of the object identifier,
627   * since this will be done by the object get methods.
628   */
629  return _Objects_Information_table[ the_api ][ 1 ];
630}
631
632/**
633 * @brief Gets a thread by its identifier.
634 *
635 * @see _Objects_Get().
636 */
637Thread_Control *_Thread_Get(
638  Objects_Id         id,
639  ISR_lock_Context  *lock_context
640);
641
642RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
643  const Thread_Control *thread
644)
645{
646#if defined(RTEMS_SMP)
647  return thread->Scheduler.cpu;
648#else
649  (void) thread;
650
651  return _Per_CPU_Get();
652#endif
653}
654
655RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
656  Thread_Control *thread,
657  Per_CPU_Control *cpu
658)
659{
660#if defined(RTEMS_SMP)
661  thread->Scheduler.cpu = cpu;
662#else
663  (void) thread;
664  (void) cpu;
665#endif
666}
667
668/**
669 * This function returns true if the_thread is the currently executing
670 * thread, and false otherwise.
671 */
672
673RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
674  const Thread_Control *the_thread
675)
676{
677  return ( the_thread == _Thread_Executing );
678}
679
680#if defined(RTEMS_SMP)
681/**
682 * @brief Returns @a true in case the thread executes currently on some
683 * processor in the system, otherwise @a false.
684 *
685 * Do not confuse this with _Thread_Is_executing() which checks only the
686 * current processor.
687 */
688RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
689  const Thread_Control *the_thread
690)
691{
692  return _CPU_Context_Get_is_executing( &the_thread->Registers );
693}
694#endif
695
696/**
697 * This function returns true if the_thread is the heir
698 * thread, and false otherwise.
699 */
700
701RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
702  const Thread_Control *the_thread
703)
704{
705  return ( the_thread == _Thread_Heir );
706}
707
708/**
709 * This routine clears any blocking state for the_thread.  It performs
710 * any necessary scheduling operations including the selection of
711 * a new heir thread.
712 */
713
714RTEMS_INLINE_ROUTINE void _Thread_Unblock (
715  Thread_Control *the_thread
716)
717{
718  _Thread_Clear_state( the_thread, STATES_BLOCKED );
719}
720
721/**
722 * This function returns true if the floating point context of
723 * the_thread is currently loaded in the floating point unit, and
724 * false otherwise.
725 */
726
727#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
728RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
729  const Thread_Control *the_thread
730)
731{
732  return ( the_thread == _Thread_Allocated_fp );
733}
734#endif
735
736/*
737 *  If the CPU has hardware floating point, then we must address saving
738 *  and restoring it as part of the context switch.
739 *
740 *  The second conditional compilation section selects the algorithm used
741 *  to context switch between floating point tasks.  The deferred algorithm
742 *  can be significantly better in a system with few floating point tasks
743 *  because it reduces the total number of save and restore FP context
744 *  operations.  However, this algorithm can not be used on all CPUs due
745 *  to unpredictable use of FP registers by some compilers for integer
746 *  operations.
747 */
748
749RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
750{
751#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
752#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
753  if ( executing->fp_context != NULL )
754    _Context_Save_fp( &executing->fp_context );
755#endif
756#endif
757}
758
759RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
760{
761#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
762#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
763  if ( (executing->fp_context != NULL) &&
764       !_Thread_Is_allocated_fp( executing ) ) {
765    if ( _Thread_Allocated_fp != NULL )
766      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
767    _Context_Restore_fp( &executing->fp_context );
768    _Thread_Allocated_fp = executing;
769  }
770#else
771  if ( executing->fp_context != NULL )
772    _Context_Restore_fp( &executing->fp_context );
773#endif
774#endif
775}
776
777/**
778 * This routine is invoked when the currently loaded floating
779 * point context is now longer associated with an active thread.
780 */
781
782#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
783RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
784{
785  _Thread_Allocated_fp = NULL;
786}
787#endif
788
789/**
790 * This function returns true if dispatching is disabled, and false
791 * otherwise.
792 */
793
794RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
795{
796  return ( _Thread_Dispatch_necessary );
797}
798
799/**
800 * This function returns true if the_thread is NULL and false otherwise.
801 */
802
803RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
804  const Thread_Control *the_thread
805)
806{
807  return ( the_thread == NULL );
808}
809
810/**
811 * @brief Is proxy blocking.
812 *
813 * status which indicates that a proxy is blocking, and false otherwise.
814 */
815RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
816  uint32_t   code
817)
818{
819  return (code == THREAD_STATUS_PROXY_BLOCKING);
820}
821
822RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
823{
824  /* Idle threads */
825  uint32_t maximum_internal_threads =
826    rtems_configuration_get_maximum_processors();
827
828  /* MPCI thread */
829#if defined(RTEMS_MULTIPROCESSING)
830  if ( _System_state_Is_multiprocessing ) {
831    ++maximum_internal_threads;
832  }
833#endif
834
835  return maximum_internal_threads;
836}
837
838RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
839{
840  return (Thread_Control *)
841    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
842}
843
844/**
845 * @brief Gets the heir of the processor and makes it executing.
846 *
847 * Must be called with interrupts disabled.  The thread dispatch necessary
848 * indicator is cleared as a side-effect.
849 *
850 * @return The heir thread.
851 *
852 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
853 * _Thread_Dispatch_update_heir().
854 */
855RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
856  Per_CPU_Control *cpu_self
857)
858{
859  Thread_Control *heir;
860
861  heir = cpu_self->heir;
862  cpu_self->dispatch_necessary = false;
863  cpu_self->executing = heir;
864
865  return heir;
866}
867
868RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
869  Thread_Control  *the_thread,
870  Per_CPU_Control *cpu
871)
872{
873  Timestamp_Control last;
874  Timestamp_Control ran;
875
876  last = cpu->cpu_usage_timestamp;
877  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
878  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
879  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
880}
881
882#if defined( RTEMS_SMP )
883RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
884  Per_CPU_Control *cpu_self,
885  Per_CPU_Control *cpu_for_heir,
886  Thread_Control  *heir
887)
888{
889  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
890
891  cpu_for_heir->heir = heir;
892
893  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
894}
895#endif
896
897void _Thread_Get_CPU_time_used(
898  Thread_Control    *the_thread,
899  Timestamp_Control *cpu_time_used
900);
901
902RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
903  Thread_Action_control *action_control
904)
905{
906  _Chain_Initialize_empty( &action_control->Chain );
907}
908
909RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
910  Thread_Action *action
911)
912{
913  _Chain_Set_off_chain( &action->Node );
914}
915
916RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
917  Thread_Control        *the_thread,
918  Thread_Action         *action,
919  Thread_Action_handler  handler
920)
921{
922  Per_CPU_Control *cpu_of_thread;
923
924  _Assert( _Thread_State_is_owner( the_thread ) );
925
926  cpu_of_thread = _Thread_Get_CPU( the_thread );
927
928  action->handler = handler;
929
930  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
931
932  _Chain_Append_if_is_off_chain_unprotected(
933    &the_thread->Post_switch_actions.Chain,
934    &action->Node
935  );
936}
937
938RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
939  Thread_Life_state life_state
940)
941{
942  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
943}
944
945RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
946  Thread_Life_state life_state
947)
948{
949  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
950}
951
952RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
953  Thread_Life_state life_state
954)
955{
956  return ( life_state
957    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
958}
959
960RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
961  Thread_Life_state life_state
962)
963{
964  return ( life_state
965    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
966}
967
968RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
969  const Thread_Control *the_thread
970)
971{
972  _Assert( _Thread_State_is_owner( the_thread ) );
973  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
974}
975
976RTEMS_INLINE_ROUTINE void _Thread_Resource_count_increment(
977  Thread_Control *the_thread
978)
979{
980  ++the_thread->resource_count;
981}
982
983RTEMS_INLINE_ROUTINE void _Thread_Resource_count_decrement(
984  Thread_Control *the_thread
985)
986{
987  --the_thread->resource_count;
988}
989
990/**
991 * @brief Returns true if the thread owns resources, and false otherwise.
992 *
993 * Resources are accounted with the Thread_Control::resource_count resource
994 * counter.  This counter is used by mutex objects for example.
995 *
996 * @param[in] the_thread The thread.
997 */
998RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
999  const Thread_Control *the_thread
1000)
1001{
1002  return the_thread->resource_count != 0;
1003}
1004
1005#if defined(RTEMS_SMP)
1006RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
1007  Thread_Control  *the_thread,
1008  Per_CPU_Control *cpu
1009)
1010{
1011  _Per_CPU_Acquire( cpu );
1012
1013  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
1014    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
1015    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
1016  }
1017
1018  _Per_CPU_Release( cpu );
1019}
1020#endif
1021
1022RTEMS_INLINE_ROUTINE const Scheduler_Control *_Thread_Scheduler_get_home(
1023  const Thread_Control *the_thread
1024)
1025{
1026#if defined(RTEMS_SMP)
1027  return the_thread->Scheduler.home;
1028#else
1029  (void) the_thread;
1030  return &_Scheduler_Table[ 0 ];
1031#endif
1032}
1033
1034RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1035  const Thread_Control *the_thread
1036)
1037{
1038#if defined(RTEMS_SMP)
1039  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1040  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1041    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1042  );
1043#else
1044  return the_thread->Scheduler.nodes;
1045#endif
1046}
1047
1048RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1049  const Thread_Control *the_thread,
1050  size_t                scheduler_index
1051)
1052{
1053#if defined(RTEMS_SMP)
1054  return (Scheduler_Node *)
1055    ( (uintptr_t) the_thread->Scheduler.nodes
1056      + scheduler_index * _Scheduler_Node_size );
1057#else
1058  _Assert( scheduler_index == 0 );
1059  (void) scheduler_index;
1060  return the_thread->Scheduler.nodes;
1061#endif
1062}
1063
1064#if defined(RTEMS_SMP)
1065RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1066  Thread_Control   *the_thread,
1067  ISR_lock_Context *lock_context
1068)
1069{
1070  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1071}
1072
1073RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1074  Thread_Control   *the_thread,
1075  ISR_lock_Context *lock_context
1076)
1077{
1078  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1079}
1080
1081#if defined(RTEMS_SMP)
1082void _Thread_Scheduler_ask_for_help( Thread_Control *the_thread );
1083
1084void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1085#endif
1086
1087RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1088  Thread_Control         *the_thread,
1089  Scheduler_Node         *scheduler_node,
1090  Scheduler_Node_request  request
1091)
1092{
1093  ISR_lock_Context       lock_context;
1094  Scheduler_Node_request current_request;
1095
1096  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1097
1098  current_request = scheduler_node->Thread.request;
1099
1100  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1101    _Assert(
1102      request == SCHEDULER_NODE_REQUEST_ADD
1103        || request == SCHEDULER_NODE_REQUEST_REMOVE
1104    );
1105    _Assert( scheduler_node->Thread.next_request == NULL );
1106    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1107    the_thread->Scheduler.requests = scheduler_node;
1108  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1109    _Assert(
1110      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1111        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1112      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1113        && request == SCHEDULER_NODE_REQUEST_ADD )
1114    );
1115    request = SCHEDULER_NODE_REQUEST_NOTHING;
1116  }
1117
1118  scheduler_node->Thread.request = request;
1119
1120  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1121}
1122
1123RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1124  Thread_Control *the_thread,
1125  Scheduler_Node *scheduler_node
1126)
1127{
1128  _Chain_Append_unprotected(
1129    &the_thread->Scheduler.Wait_nodes,
1130    &scheduler_node->Thread.Wait_node
1131  );
1132  _Thread_Scheduler_add_request(
1133    the_thread,
1134    scheduler_node,
1135    SCHEDULER_NODE_REQUEST_ADD
1136  );
1137}
1138
1139RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1140  Thread_Control *the_thread,
1141  Scheduler_Node *scheduler_node
1142)
1143{
1144  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1145  _Thread_Scheduler_add_request(
1146    the_thread,
1147    scheduler_node,
1148    SCHEDULER_NODE_REQUEST_REMOVE
1149  );
1150}
1151#endif
1152
1153/**
1154 * @brief Returns the priority of the thread.
1155 *
1156 * Returns the user API and thread wait information relevant thread priority.
1157 * This includes temporary thread priority adjustments due to locking
1158 * protocols, a job release or the POSIX sporadic server for example.
1159 *
1160 * @return The priority of the thread.
1161 */
1162RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1163  const Thread_Control *the_thread
1164)
1165{
1166  Scheduler_Node *scheduler_node;
1167
1168  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1169  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1170}
1171
1172/**
1173 * @brief Acquires the thread wait default lock inside a critical section
1174 * (interrupts disabled).
1175 *
1176 * @param[in] the_thread The thread.
1177 * @param[in] lock_context The lock context used for the corresponding lock
1178 *   release.
1179 *
1180 * @see _Thread_Wait_release_default_critical().
1181 */
1182RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1183  Thread_Control   *the_thread,
1184  ISR_lock_Context *lock_context
1185)
1186{
1187  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1188}
1189
1190/**
1191 * @brief Acquires the thread wait default lock and returns the executing
1192 * thread.
1193 *
1194 * @param[in] lock_context The lock context used for the corresponding lock
1195 *   release.
1196 *
1197 * @return The executing thread.
1198 *
1199 * @see _Thread_Wait_release_default().
1200 */
1201RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1202  ISR_lock_Context *lock_context
1203)
1204{
1205  Thread_Control *executing;
1206
1207  _ISR_lock_ISR_disable( lock_context );
1208  executing = _Thread_Executing;
1209  _Thread_Wait_acquire_default_critical( executing, lock_context );
1210
1211  return executing;
1212}
1213
1214/**
1215 * @brief Acquires the thread wait default lock and disables interrupts.
1216 *
1217 * @param[in] the_thread The thread.
1218 * @param[in] lock_context The lock context used for the corresponding lock
1219 *   release.
1220 *
1221 * @see _Thread_Wait_release_default().
1222 */
1223RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1224  Thread_Control   *the_thread,
1225  ISR_lock_Context *lock_context
1226)
1227{
1228  _ISR_lock_ISR_disable( lock_context );
1229  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1230}
1231
1232/**
1233 * @brief Releases the thread wait default lock inside a critical section
1234 * (interrupts disabled).
1235 *
1236 * The previous interrupt status is not restored.
1237 *
1238 * @param[in] the_thread The thread.
1239 * @param[in] lock_context The lock context used for the corresponding lock
1240 *   acquire.
1241 */
1242RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1243  Thread_Control   *the_thread,
1244  ISR_lock_Context *lock_context
1245)
1246{
1247  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1248}
1249
1250/**
1251 * @brief Releases the thread wait default lock and restores the previous
1252 * interrupt status.
1253 *
1254 * @param[in] the_thread The thread.
1255 * @param[in] lock_context The lock context used for the corresponding lock
1256 *   acquire.
1257 */
1258RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1259  Thread_Control   *the_thread,
1260  ISR_lock_Context *lock_context
1261)
1262{
1263  _Thread_Wait_release_default_critical( the_thread, lock_context );
1264  _ISR_lock_ISR_enable( lock_context );
1265}
1266
1267#if defined(RTEMS_SMP)
1268#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1269  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1270
1271RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1272  Thread_Control            *the_thread,
1273  Thread_queue_Lock_context *queue_lock_context
1274)
1275{
1276  Chain_Node *first;
1277
1278  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1279  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1280
1281  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1282    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1283  }
1284}
1285
1286RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1287  Thread_queue_Queue        *queue,
1288  Thread_queue_Lock_context *queue_lock_context
1289)
1290{
1291  _Thread_queue_Queue_acquire_critical(
1292    queue,
1293    &_Thread_Executing->Potpourri_stats,
1294    &queue_lock_context->Lock_context
1295  );
1296}
1297
1298RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1299  Thread_queue_Queue        *queue,
1300  Thread_queue_Lock_context *queue_lock_context
1301)
1302{
1303  _Thread_queue_Queue_release_critical(
1304    queue,
1305    &queue_lock_context->Lock_context
1306  );
1307}
1308#endif
1309
1310/**
1311 * @brief Acquires the thread wait lock inside a critical section (interrupts
1312 * disabled).
1313 *
1314 * @param[in] the_thread The thread.
1315 * @param[in] queue_context The thread queue context for the corresponding
1316 *   _Thread_Wait_release_critical().
1317 *
1318 * @see _Thread_queue_Context_initialize().
1319 */
1320RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1321  Thread_Control       *the_thread,
1322  Thread_queue_Context *queue_context
1323)
1324{
1325#if defined(RTEMS_SMP)
1326  Thread_queue_Queue *queue;
1327
1328  _Thread_Wait_acquire_default_critical(
1329    the_thread,
1330    &queue_context->Lock_context.Lock_context
1331  );
1332
1333  queue = the_thread->Wait.queue;
1334  queue_context->Lock_context.Wait.queue = queue;
1335
1336  if ( queue != NULL ) {
1337    _Thread_queue_Gate_add(
1338      &the_thread->Wait.Lock.Pending_requests,
1339      &queue_context->Lock_context.Wait.Gate
1340    );
1341    _Thread_Wait_release_default_critical(
1342      the_thread,
1343      &queue_context->Lock_context.Lock_context
1344    );
1345    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1346
1347    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1348      _Thread_Wait_release_queue_critical(
1349        queue,
1350        &queue_context->Lock_context
1351      );
1352      _Thread_Wait_acquire_default_critical(
1353        the_thread,
1354        &queue_context->Lock_context.Lock_context
1355      );
1356      _Thread_Wait_remove_request_locked(
1357        the_thread,
1358        &queue_context->Lock_context
1359      );
1360      _Assert( the_thread->Wait.queue == NULL );
1361    }
1362  }
1363#else
1364  (void) the_thread;
1365  (void) queue_context;
1366#endif
1367}
1368
1369/**
1370 * @brief Acquires the thread wait default lock and disables interrupts.
1371 *
1372 * @param[in] the_thread The thread.
1373 * @param[in] queue_context The thread queue context for the corresponding
1374 *   _Thread_Wait_release().
1375 */
1376RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1377  Thread_Control       *the_thread,
1378  Thread_queue_Context *queue_context
1379)
1380{
1381  _Thread_queue_Context_initialize( queue_context );
1382  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1383  _Thread_Wait_acquire_critical( the_thread, queue_context );
1384}
1385
1386/**
1387 * @brief Releases the thread wait lock inside a critical section (interrupts
1388 * disabled).
1389 *
1390 * The previous interrupt status is not restored.
1391 *
1392 * @param[in] the_thread The thread.
1393 * @param[in] queue_context The thread queue context used for corresponding
1394 *   _Thread_Wait_acquire_critical().
1395 */
1396RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1397  Thread_Control       *the_thread,
1398  Thread_queue_Context *queue_context
1399)
1400{
1401#if defined(RTEMS_SMP)
1402  Thread_queue_Queue *queue;
1403
1404  queue = queue_context->Lock_context.Wait.queue;
1405
1406  if ( queue != NULL ) {
1407    _Thread_Wait_release_queue_critical(
1408      queue, &queue_context->Lock_context
1409    );
1410    _Thread_Wait_acquire_default_critical(
1411      the_thread,
1412      &queue_context->Lock_context.Lock_context
1413    );
1414    _Thread_Wait_remove_request_locked(
1415      the_thread,
1416      &queue_context->Lock_context
1417    );
1418  }
1419
1420  _Thread_Wait_release_default_critical(
1421    the_thread,
1422    &queue_context->Lock_context.Lock_context
1423  );
1424#else
1425  (void) the_thread;
1426  (void) queue_context;
1427#endif
1428}
1429
1430/**
1431 * @brief Releases the thread wait lock and restores the previous interrupt
1432 * status.
1433 *
1434 * @param[in] the_thread The thread.
1435 * @param[in] queue_context The thread queue context used for corresponding
1436 *   _Thread_Wait_acquire().
1437 */
1438RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1439  Thread_Control       *the_thread,
1440  Thread_queue_Context *queue_context
1441)
1442{
1443  _Thread_Wait_release_critical( the_thread, queue_context );
1444  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1445}
1446
1447/**
1448 * @brief Claims the thread wait queue.
1449 *
1450 * The caller must not be the owner of the default thread wait lock.  The
1451 * caller must be the owner of the corresponding thread queue lock.  The
1452 * registration of the corresponding thread queue operations is deferred and
1453 * done after the deadlock detection.  This is crucial to support timeouts on
1454 * SMP configurations.
1455 *
1456 * @param[in] the_thread The thread.
1457 * @param[in] queue The new thread queue.
1458 *
1459 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
1460 */
1461RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1462  Thread_Control     *the_thread,
1463  Thread_queue_Queue *queue
1464)
1465{
1466  ISR_lock_Context lock_context;
1467
1468  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1469
1470  _Assert( the_thread->Wait.queue == NULL );
1471
1472#if defined(RTEMS_SMP)
1473  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1474  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1475  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1476#endif
1477
1478  the_thread->Wait.queue = queue;
1479
1480  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1481}
1482
1483/**
1484 * @brief Finalizes the thread wait queue claim via registration of the
1485 * corresponding thread queue operations.
1486 *
1487 * @param[in] the_thread The thread.
1488 * @param[in] operations The corresponding thread queue operations.
1489 */
1490RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
1491  Thread_Control                *the_thread,
1492  const Thread_queue_Operations *operations
1493)
1494{
1495  the_thread->Wait.operations = operations;
1496}
1497
1498/**
1499 * @brief Removes a thread wait lock request.
1500 *
1501 * On SMP configurations, removes a thread wait lock request.
1502 *
1503 * On other configurations, this function does nothing.
1504 *
1505 * @param[in] the_thread The thread.
1506 * @param[in] queue_lock_context The thread queue lock context used for
1507 *   corresponding _Thread_Wait_acquire().
1508 */
1509RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1510  Thread_Control            *the_thread,
1511  Thread_queue_Lock_context *queue_lock_context
1512)
1513{
1514#if defined(RTEMS_SMP)
1515  ISR_lock_Context lock_context;
1516
1517  _Thread_Wait_acquire_default( the_thread, &lock_context );
1518  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1519  _Thread_Wait_release_default( the_thread, &lock_context );
1520#else
1521  (void) the_thread;
1522  (void) queue_lock_context;
1523#endif
1524}
1525
1526/**
1527 * @brief Restores the default thread wait queue and operations.
1528 *
1529 * The caller must be the owner of the current thread wait queue lock.
1530 *
1531 * On SMP configurations, the pending requests are updated to use the stale
1532 * thread queue operations.
1533 *
1534 * @param[in] the_thread The thread.
1535 *
1536 * @see _Thread_Wait_claim().
1537 */
1538RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1539  Thread_Control *the_thread
1540)
1541{
1542#if defined(RTEMS_SMP)
1543  ISR_lock_Context  lock_context;
1544  Chain_Node       *node;
1545  const Chain_Node *tail;
1546
1547  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1548
1549  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1550  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1551
1552  if ( node != tail ) {
1553    do {
1554      Thread_queue_Context *queue_context;
1555
1556      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1557      queue_context->Lock_context.Wait.queue = NULL;
1558
1559      node = _Chain_Next( node );
1560    } while ( node != tail );
1561
1562    _Thread_queue_Gate_add(
1563      &the_thread->Wait.Lock.Pending_requests,
1564      &the_thread->Wait.Lock.Tranquilizer
1565    );
1566  } else {
1567    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1568  }
1569#endif
1570
1571  the_thread->Wait.queue = NULL;
1572  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1573
1574#if defined(RTEMS_SMP)
1575  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1576#endif
1577}
1578
1579/**
1580 * @brief Tranquilizes the thread after a wait on a thread queue.
1581 *
1582 * After the violent blocking procedure this function makes the thread calm and
1583 * peaceful again so that it can carry out its normal work.
1584 *
1585 * On SMP configurations, ensures that all pending thread wait lock requests
1586 * completed before the thread is able to begin a new thread wait procedure.
1587 *
1588 * On other configurations, this function does nothing.
1589 *
1590 * It must be called after a _Thread_Wait_claim() exactly once
1591 *  - after the corresponding thread queue lock was released, and
1592 *  - the default wait state is restored or some other processor is about to do
1593 *    this.
1594 *
1595 * @param[in] the_thread The thread.
1596 */
1597RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1598  Thread_Control *the_thread
1599)
1600{
1601#if defined(RTEMS_SMP)
1602  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1603#else
1604  (void) the_thread;
1605#endif
1606}
1607
1608/**
1609 * @brief Cancels a thread wait on a thread queue.
1610 *
1611 * @param[in] the_thread The thread.
1612 * @param[in] queue_context The thread queue context used for corresponding
1613 *   _Thread_Wait_acquire().
1614 */
1615RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1616  Thread_Control       *the_thread,
1617  Thread_queue_Context *queue_context
1618)
1619{
1620  Thread_queue_Queue *queue;
1621
1622  queue = the_thread->Wait.queue;
1623
1624#if defined(RTEMS_SMP)
1625  if ( queue != NULL ) {
1626    _Assert( queue_context->Lock_context.Wait.queue == queue );
1627#endif
1628
1629    ( *the_thread->Wait.operations->extract )(
1630      queue,
1631      the_thread,
1632      queue_context
1633    );
1634    _Thread_Wait_restore_default( the_thread );
1635
1636#if defined(RTEMS_SMP)
1637    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1638    queue_context->Lock_context.Wait.queue = queue;
1639  }
1640#endif
1641}
1642
1643/**
1644 * @brief The initial thread wait flags value set by _Thread_Initialize().
1645 */
1646#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1647
1648/**
1649 * @brief Mask to get the thread wait state flags.
1650 */
1651#define THREAD_WAIT_STATE_MASK 0xffU
1652
1653/**
1654 * @brief Indicates that the thread begins with the blocking operation.
1655 *
1656 * A blocking operation consists of an optional watchdog initialization and the
1657 * setting of the appropriate thread blocking state with the corresponding
1658 * scheduler block operation.
1659 */
1660#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1661
1662/**
1663 * @brief Indicates that the thread completed the blocking operation.
1664 */
1665#define THREAD_WAIT_STATE_BLOCKED 0x2U
1666
1667/**
1668 * @brief Indicates that a condition to end the thread wait occurred.
1669 *
1670 * This could be a timeout, a signal, an event or a resource availability.
1671 */
1672#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1673
1674/**
1675 * @brief Mask to get the thread wait class flags.
1676 */
1677#define THREAD_WAIT_CLASS_MASK 0xff00U
1678
1679/**
1680 * @brief Indicates that the thread waits for an event.
1681 */
1682#define THREAD_WAIT_CLASS_EVENT 0x100U
1683
1684/**
1685 * @brief Indicates that the thread waits for a system event.
1686 */
1687#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1688
1689/**
1690 * @brief Indicates that the thread waits for an object.
1691 */
1692#define THREAD_WAIT_CLASS_OBJECT 0x400U
1693
1694/**
1695 * @brief Indicates that the thread waits for a period.
1696 */
1697#define THREAD_WAIT_CLASS_PERIOD 0x800U
1698
1699RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1700  Thread_Control    *the_thread,
1701  Thread_Wait_flags  flags
1702)
1703{
1704#if defined(RTEMS_SMP)
1705  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1706#else
1707  the_thread->Wait.flags = flags;
1708#endif
1709}
1710
1711RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1712  const Thread_Control *the_thread
1713)
1714{
1715#if defined(RTEMS_SMP)
1716  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1717#else
1718  return the_thread->Wait.flags;
1719#endif
1720}
1721
1722RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
1723  const Thread_Control *the_thread
1724)
1725{
1726#if defined(RTEMS_SMP)
1727  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
1728#else
1729  return the_thread->Wait.flags;
1730#endif
1731}
1732
1733/**
1734 * @brief Tries to change the thread wait flags with release semantics in case
1735 * of success.
1736 *
1737 * Must be called inside a critical section (interrupts disabled).
1738 *
1739 * In case the wait flags are equal to the expected wait flags, then the wait
1740 * flags are set to the desired wait flags.
1741 *
1742 * @param[in] the_thread The thread.
1743 * @param[in] expected_flags The expected wait flags.
1744 * @param[in] desired_flags The desired wait flags.
1745 *
1746 * @retval true The wait flags were equal to the expected wait flags.
1747 * @retval false Otherwise.
1748 */
1749RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1750  Thread_Control    *the_thread,
1751  Thread_Wait_flags  expected_flags,
1752  Thread_Wait_flags  desired_flags
1753)
1754{
1755  _Assert( _ISR_Get_level() != 0 );
1756
1757#if defined(RTEMS_SMP)
1758  return _Atomic_Compare_exchange_uint(
1759    &the_thread->Wait.flags,
1760    &expected_flags,
1761    desired_flags,
1762    ATOMIC_ORDER_RELEASE,
1763    ATOMIC_ORDER_RELAXED
1764  );
1765#else
1766  bool success = ( the_thread->Wait.flags == expected_flags );
1767
1768  if ( success ) {
1769    the_thread->Wait.flags = desired_flags;
1770  }
1771
1772  return success;
1773#endif
1774}
1775
1776/**
1777 * @brief Tries to change the thread wait flags with acquire semantics.
1778 *
1779 * In case the wait flags are equal to the expected wait flags, then the wait
1780 * flags are set to the desired wait flags.
1781 *
1782 * @param[in] the_thread The thread.
1783 * @param[in] expected_flags The expected wait flags.
1784 * @param[in] desired_flags The desired wait flags.
1785 *
1786 * @retval true The wait flags were equal to the expected wait flags.
1787 * @retval false Otherwise.
1788 */
1789RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1790  Thread_Control    *the_thread,
1791  Thread_Wait_flags  expected_flags,
1792  Thread_Wait_flags  desired_flags
1793)
1794{
1795  bool success;
1796#if defined(RTEMS_SMP)
1797  return _Atomic_Compare_exchange_uint(
1798    &the_thread->Wait.flags,
1799    &expected_flags,
1800    desired_flags,
1801    ATOMIC_ORDER_ACQUIRE,
1802    ATOMIC_ORDER_ACQUIRE
1803  );
1804#else
1805  ISR_Level level;
1806
1807  _ISR_Local_disable( level );
1808
1809  success = _Thread_Wait_flags_try_change_release(
1810    the_thread,
1811    expected_flags,
1812    desired_flags
1813  );
1814
1815  _ISR_Local_enable( level );
1816#endif
1817
1818  return success;
1819}
1820
1821/**
1822 * @brief Returns the object identifier of the object containing the current
1823 * thread wait queue.
1824 *
1825 * This function may be used for debug and system information purposes.  The
1826 * caller must be the owner of the thread lock.
1827 *
1828 * @retval 0 The thread waits on no thread queue currently, the thread wait
1829 *   queue is not contained in an object, or the current thread state provides
1830 *   insufficient information, e.g. the thread is in the middle of a blocking
1831 *   operation.
1832 * @retval other The object identifier of the object containing the thread wait
1833 *   queue.
1834 */
1835Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1836
1837RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1838  const Thread_Control *the_thread
1839)
1840{
1841  return (Status_Control) the_thread->Wait.return_code;
1842}
1843
1844/**
1845 * @brief General purpose thread wait timeout.
1846 *
1847 * @param[in] watchdog The thread timer watchdog.
1848 */
1849void _Thread_Timeout( Watchdog_Control *watchdog );
1850
1851RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1852  Thread_Timer_information *timer,
1853  Per_CPU_Control          *cpu
1854)
1855{
1856  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1857  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1858  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1859}
1860
1861RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative(
1862  Thread_Control                 *the_thread,
1863  Per_CPU_Control                *cpu,
1864  Watchdog_Service_routine_entry  routine,
1865  Watchdog_Interval               ticks
1866)
1867{
1868  ISR_lock_Context lock_context;
1869
1870  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1871
1872  the_thread->Timer.header =
1873    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1874  the_thread->Timer.Watchdog.routine = routine;
1875  _Watchdog_Per_CPU_insert_relative( &the_thread->Timer.Watchdog, cpu, ticks );
1876
1877  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1878}
1879
1880RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_absolute(
1881  Thread_Control                 *the_thread,
1882  Per_CPU_Control                *cpu,
1883  Watchdog_Service_routine_entry  routine,
1884  uint64_t                        expire
1885)
1886{
1887  ISR_lock_Context lock_context;
1888
1889  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1890
1891  the_thread->Timer.header =
1892    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_ABSOLUTE ];
1893  the_thread->Timer.Watchdog.routine = routine;
1894  _Watchdog_Per_CPU_insert_absolute( &the_thread->Timer.Watchdog, cpu, expire );
1895
1896  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1897}
1898
1899RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1900{
1901  ISR_lock_Context lock_context;
1902
1903  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1904
1905  _Watchdog_Per_CPU_remove(
1906    &the_thread->Timer.Watchdog,
1907#if defined(RTEMS_SMP)
1908    the_thread->Timer.Watchdog.cpu,
1909#else
1910    _Per_CPU_Get(),
1911#endif
1912    the_thread->Timer.header
1913  );
1914
1915  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1916}
1917
1918RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1919  Thread_Control     *the_thread,
1920  Thread_queue_Queue *queue
1921)
1922{
1923  _Thread_Wait_tranquilize( the_thread );
1924  _Thread_Timer_remove( the_thread );
1925
1926#if defined(RTEMS_MULTIPROCESSING)
1927  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1928    _Thread_Unblock( the_thread );
1929  } else {
1930    _Thread_queue_Unblock_proxy( queue, the_thread );
1931  }
1932#else
1933  (void) queue;
1934  _Thread_Unblock( the_thread );
1935#endif
1936}
1937
1938/** @}*/
1939
1940#ifdef __cplusplus
1941}
1942#endif
1943
1944#if defined(RTEMS_MULTIPROCESSING)
1945#include <rtems/score/threadmp.h>
1946#endif
1947
1948#endif
1949/* end of include file */
Note: See TracBrowser for help on using the repository browser.