source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 97f7dac

5
Last change on this file since 97f7dac was 97f7dac, checked in by Sebastian Huber <sebastian.huber@…>, on Oct 21, 2016 at 7:23:48 AM

score: Delete _Scheduler_Ask_for_help_if_necessary

Delete Thread_Control::Resource_node.

Update #2556.

  • Property mode set to 100644
File size: 50.7 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2016 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/schedulernodeimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/threadqimpl.h>
35#include <rtems/score/todimpl.h>
36#include <rtems/score/freechain.h>
37#include <rtems/score/watchdogimpl.h>
38#include <rtems/config.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/**
45 * @addtogroup ScoreThread
46 */
47/**@{**/
48
49/**
50 *  The following structure contains the information necessary to manage
51 *  a thread which it is  waiting for a resource.
52 */
53#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
54
55/**
56 *  Self for the GNU Ada Run-Time
57 */
58extern void *rtems_ada_self;
59
60typedef struct {
61  Objects_Information Objects;
62
63  Freechain_Control Free_thread_queue_heads;
64} Thread_Information;
65
66/**
67 *  The following defines the information control block used to
68 *  manage this class of objects.
69 */
70extern Thread_Information _Thread_Internal_information;
71
72/**
73 *  The following points to the thread whose floating point
74 *  context is currently loaded.
75 */
76#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
77extern Thread_Control *_Thread_Allocated_fp;
78#endif
79
80#if defined(RTEMS_SMP)
81#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
82  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
83#endif
84
85typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
86
87void _Thread_Iterate(
88  Thread_Visitor  visitor,
89  void           *arg
90);
91
92void _Thread_Initialize_information(
93  Thread_Information  *information,
94  Objects_APIs         the_api,
95  uint16_t             the_class,
96  uint32_t             maximum,
97  bool                 is_string,
98  uint32_t             maximum_name_length
99);
100
101/**
102 *  @brief Initialize thread handler.
103 *
104 *  This routine performs the initialization necessary for this handler.
105 */
106void _Thread_Handler_initialization(void);
107
108/**
109 *  @brief Create idle thread.
110 *
111 *  This routine creates the idle thread.
112 *
113 *  @warning No thread should be created before this one.
114 */
115void _Thread_Create_idle(void);
116
117/**
118 *  @brief Start thread multitasking.
119 *
120 *  This routine initiates multitasking.  It is invoked only as
121 *  part of initialization and its invocation is the last act of
122 *  the non-multitasking part of the system initialization.
123 */
124void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
125
126/**
127 *  @brief Allocate the requested stack space for the thread.
128 *
129 *  Allocate the requested stack space for the thread.
130 *  Set the Start.stack field to the address of the stack.
131 *
132 *  @param[in] the_thread is the thread where the stack space is requested
133 *  @param[in] stack_size is the stack space is requested
134 *
135 *  @retval actual size allocated after any adjustment
136 *  @retval zero if the allocation failed
137 */
138size_t _Thread_Stack_Allocate(
139  Thread_Control *the_thread,
140  size_t          stack_size
141);
142
143/**
144 *  @brief Deallocate thread stack.
145 *
146 *  Deallocate the Thread's stack.
147 */
148void _Thread_Stack_Free(
149  Thread_Control *the_thread
150);
151
152/**
153 *  @brief Initialize thread.
154 *
155 *  This routine initializes the specified the thread.  It allocates
156 *  all memory associated with this thread.  It completes by adding
157 *  the thread to the local object table so operations on this
158 *  thread id are allowed.
159 *
160 *  @note If stack_area is NULL, it is allocated from the workspace.
161 *
162 *  @note If the stack is allocated from the workspace, then it is
163 *        guaranteed to be of at least minimum size.
164 */
165bool _Thread_Initialize(
166  Thread_Information                   *information,
167  Thread_Control                       *the_thread,
168  const struct Scheduler_Control       *scheduler,
169  void                                 *stack_area,
170  size_t                                stack_size,
171  bool                                  is_fp,
172  Priority_Control                      priority,
173  bool                                  is_preemptible,
174  Thread_CPU_budget_algorithms          budget_algorithm,
175  Thread_CPU_budget_algorithm_callout   budget_callout,
176  uint32_t                              isr_level,
177  Objects_Name                          name
178);
179
180/**
181 *  @brief Initializes thread and executes it.
182 *
183 *  This routine initializes the executable information for a thread
184 *  and makes it ready to execute.  After this routine executes, the
185 *  thread competes with all other threads for CPU time.
186 *
187 *  @param the_thread The thread to be started.
188 *  @param entry The thread entry information.
189 */
190bool _Thread_Start(
191  Thread_Control                 *the_thread,
192  const Thread_Entry_information *entry,
193  ISR_lock_Context               *lock_context
194);
195
196void _Thread_Restart_self(
197  Thread_Control                 *executing,
198  const Thread_Entry_information *entry,
199  ISR_lock_Context               *lock_context
200) RTEMS_NO_RETURN;
201
202bool _Thread_Restart_other(
203  Thread_Control                 *the_thread,
204  const Thread_Entry_information *entry,
205  ISR_lock_Context               *lock_context
206);
207
208void _Thread_Yield( Thread_Control *executing );
209
210Thread_Life_state _Thread_Change_life(
211  Thread_Life_state clear,
212  Thread_Life_state set,
213  Thread_Life_state ignore
214);
215
216Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
217
218/**
219 * @brief Kills all zombie threads in the system.
220 *
221 * Threads change into the zombie state as the last step in the thread
222 * termination sequence right before a context switch to the heir thread is
223 * initiated.  Since the thread stack is still in use during this phase we have
224 * to postpone the thread stack reclamation until this point.  On SMP
225 * configurations we may have to busy wait for context switch completion here.
226 */
227void _Thread_Kill_zombies( void );
228
229void _Thread_Exit(
230  Thread_Control    *executing,
231  Thread_Life_state  set,
232  void              *exit_value
233);
234
235void _Thread_Join(
236  Thread_Control       *the_thread,
237  States_Control        waiting_for_join,
238  Thread_Control       *executing,
239  Thread_queue_Context *queue_context
240);
241
242void _Thread_Cancel(
243  Thread_Control *the_thread,
244  Thread_Control *executing,
245  void           *exit_value
246);
247
248/**
249 * @brief Closes the thread.
250 *
251 * Closes the thread object and starts the thread termination sequence.  In
252 * case the executing thread is not terminated, then this function waits until
253 * the terminating thread reached the zombie state.
254 */
255void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
256
257RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
258{
259  return _States_Is_ready( the_thread->current_state );
260}
261
262States_Control _Thread_Clear_state_locked(
263  Thread_Control *the_thread,
264  States_Control  state
265);
266
267/**
268 * @brief Clears the specified thread state.
269 *
270 * In case the previous state is a non-ready state and the next state is the
271 * ready state, then the thread is unblocked by the scheduler.
272 *
273 * @param[in] the_thread The thread.
274 * @param[in] state The state to clear.  It must not be zero.
275 *
276 * @return The previous state.
277 */
278States_Control _Thread_Clear_state(
279  Thread_Control *the_thread,
280  States_Control  state
281);
282
283States_Control _Thread_Set_state_locked(
284  Thread_Control *the_thread,
285  States_Control  state
286);
287
288/**
289 * @brief Sets the specified thread state.
290 *
291 * In case the previous state is the ready state, then the thread is blocked by
292 * the scheduler.
293 *
294 * @param[in] the_thread The thread.
295 * @param[in] state The state to set.  It must not be zero.
296 *
297 * @return The previous state.
298 */
299States_Control _Thread_Set_state(
300  Thread_Control *the_thread,
301  States_Control  state
302);
303
304/**
305 *  @brief Initializes enviroment for a thread.
306 *
307 *  This routine initializes the context of @a the_thread to its
308 *  appropriate starting state.
309 *
310 *  @param[in] the_thread is the pointer to the thread control block.
311 */
312void _Thread_Load_environment(
313  Thread_Control *the_thread
314);
315
316void _Thread_Entry_adaptor_idle( Thread_Control *executing );
317
318void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
319
320void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
321
322/**
323 *  @brief Wrapper function for all threads.
324 *
325 *  This routine is the wrapper function for all threads.  It is
326 *  the starting point for all threads.  The user provided thread
327 *  entry point is invoked by this routine.  Operations
328 *  which must be performed immediately before and after the user's
329 *  thread executes are found here.
330 *
331 *  @note On entry, it is assumed all interrupts are blocked and that this
332 *  routine needs to set the initial isr level.  This may or may not
333 *  actually be needed by the context switch routine and as a result
334 *  interrupts may already be at there proper level.  Either way,
335 *  setting the initial isr level properly here is safe.
336 */
337void _Thread_Handler( void );
338
339/**
340 * @brief Executes the global constructors and then restarts itself as the
341 * first initialization thread.
342 *
343 * The first initialization thread is the first RTEMS initialization task or
344 * the first POSIX initialization thread in case no RTEMS initialization tasks
345 * are present.
346 */
347void _Thread_Global_construction(
348  Thread_Control                 *executing,
349  const Thread_Entry_information *entry
350) RTEMS_NO_RETURN;
351
352/**
353 *  @brief Ended the delay of a thread.
354 *
355 *  This routine is invoked when a thread must be unblocked at the
356 *  end of a time based delay (i.e. wake after or wake when).
357 *  It is called by the watchdog handler.
358 *
359 *  @param[in] id is the thread id
360 *  @param[in] ignored is not used
361 */
362void _Thread_Delay_ended(
363  Objects_Id  id,
364  void       *ignored
365);
366
367RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
368  Thread_Control   *the_thread,
369  ISR_lock_Context *lock_context
370)
371{
372  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
373}
374
375RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
376  Thread_Control   *the_thread,
377  ISR_lock_Context *lock_context
378)
379{
380  _ISR_lock_ISR_disable( lock_context );
381  _Thread_State_acquire_critical( the_thread, lock_context );
382}
383
384RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
385  ISR_lock_Context *lock_context
386)
387{
388  Thread_Control *executing;
389
390  _ISR_lock_ISR_disable( lock_context );
391  executing = _Thread_Executing;
392  _Thread_State_acquire_critical( executing, lock_context );
393
394  return executing;
395}
396
397RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
398  Thread_Control   *the_thread,
399  ISR_lock_Context *lock_context
400)
401{
402  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
403}
404
405RTEMS_INLINE_ROUTINE void _Thread_State_release(
406  Thread_Control   *the_thread,
407  ISR_lock_Context *lock_context
408)
409{
410  _Thread_State_release_critical( the_thread, lock_context );
411  _ISR_lock_ISR_enable( lock_context );
412}
413
414#if defined(RTEMS_DEBUG)
415RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
416  const Thread_Control *the_thread
417)
418{
419  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
420}
421#endif
422
423/**
424 * @brief Performs the priority actions specified by the thread queue context
425 * along the thread queue path.
426 *
427 * The caller must be the owner of the thread wait lock.
428 *
429 * @param start_of_path The start thread of the thread queue path.
430 * @param queue_context The thread queue context specifying the thread queue
431 *   path and initial thread priority actions.
432 *
433 * @see _Thread_queue_Path_acquire_critical().
434 */
435void _Thread_Priority_perform_actions(
436  Thread_Control       *start_of_path,
437  Thread_queue_Context *queue_context
438);
439
440/**
441 * @brief Adds the specified thread priority node to the corresponding thread
442 * priority aggregation.
443 *
444 * The caller must be the owner of the thread wait lock.
445 *
446 * @param the_thread The thread.
447 * @param priority_node The thread priority node to add.
448 * @param queue_context The thread queue context to return an updated set of
449 *   threads for _Thread_Priority_update().  The thread queue context must be
450 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
451 *   call of this function.
452 *
453 * @see _Thread_Wait_acquire().
454 */
455void _Thread_Priority_add(
456  Thread_Control       *the_thread,
457  Priority_Node        *priority_node,
458  Thread_queue_Context *queue_context
459);
460
461/**
462 * @brief Removes the specified thread priority node from the corresponding
463 * thread priority aggregation.
464 *
465 * The caller must be the owner of the thread wait lock.
466 *
467 * @param the_thread The thread.
468 * @param priority_node The thread priority node to remove.
469 * @param queue_context The thread queue context to return an updated set of
470 *   threads for _Thread_Priority_update().  The thread queue context must be
471 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
472 *   call of this function.
473 *
474 * @see _Thread_Wait_acquire().
475 */
476void _Thread_Priority_remove(
477  Thread_Control       *the_thread,
478  Priority_Node        *priority_node,
479  Thread_queue_Context *queue_context
480);
481
482/**
483 * @brief Propagates a thread priority value change in the specified thread
484 * priority node to the corresponding thread priority aggregation.
485 *
486 * The caller must be the owner of the thread wait lock.
487 *
488 * @param the_thread The thread.
489 * @param priority_node The thread priority node to change.
490 * @param prepend_it In case this is true, then the thread is prepended to
491 *   its priority group in its home scheduler instance, otherwise it is
492 *   appended.
493 * @param queue_context The thread queue context to return an updated set of
494 *   threads for _Thread_Priority_update().  The thread queue context must be
495 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
496 *   call of this function.
497 *
498 * @see _Thread_Wait_acquire().
499 */
500void _Thread_Priority_changed(
501  Thread_Control       *the_thread,
502  Priority_Node        *priority_node,
503  bool                  prepend_it,
504  Thread_queue_Context *queue_context
505);
506
507/**
508 * @brief Changes the thread priority value of the specified thread priority
509 * node in the corresponding thread priority aggregation.
510 *
511 * The caller must be the owner of the thread wait lock.
512 *
513 * @param the_thread The thread.
514 * @param priority_node The thread priority node to change.
515 * @param new_priority The new thread priority value of the thread priority
516 *   node to change.
517 * @param prepend_it In case this is true, then the thread is prepended to
518 *   its priority group in its home scheduler instance, otherwise it is
519 *   appended.
520 * @param queue_context The thread queue context to return an updated set of
521 *   threads for _Thread_Priority_update().  The thread queue context must be
522 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
523 *   call of this function.
524 *
525 * @see _Thread_Wait_acquire().
526 */
527RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
528  Thread_Control       *the_thread,
529  Priority_Node        *priority_node,
530  Priority_Control      new_priority,
531  bool                  prepend_it,
532  Thread_queue_Context *queue_context
533)
534{
535  _Priority_Node_set_priority( priority_node, new_priority );
536  _Thread_Priority_changed(
537    the_thread,
538    priority_node,
539    prepend_it,
540    queue_context
541  );
542}
543
544/**
545 * @brief Replaces the victim priority node with the replacement priority node
546 * in the corresponding thread priority aggregation.
547 *
548 * The caller must be the owner of the thread wait lock.
549 *
550 * @param the_thread The thread.
551 * @param victim_node The victim thread priority node.
552 * @param replacement_node The replacement thread priority node.
553 *
554 * @see _Thread_Wait_acquire().
555 */
556void _Thread_Priority_replace(
557  Thread_Control *the_thread,
558  Priority_Node  *victim_node,
559  Priority_Node  *replacement_node
560);
561
562/**
563 * @brief Adds a priority node to the corresponding thread priority
564 * aggregation.
565 *
566 * The caller must be the owner of the thread wait lock.
567 *
568 * @param the_thread The thread.
569 * @param priority_node The thread priority node to add.
570 * @param queue_context The thread queue context to return an updated set of
571 *   threads for _Thread_Priority_update().  The thread queue context must be
572 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
573 *   call of this function.
574 *
575 * @see _Thread_Priority_add(), _Thread_Priority_change(),
576 *   _Thread_Priority_changed() and _Thread_Priority_remove().
577 */
578void _Thread_Priority_update( Thread_queue_Context *queue_context );
579
580/**
581 * @brief Returns true if the left thread priority is less than the right
582 * thread priority in the intuitive sense of priority and false otherwise.
583 */
584RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
585  Priority_Control left,
586  Priority_Control right
587)
588{
589  return left > right;
590}
591
592/**
593 * @brief Returns the highest priority of the left and right thread priorities
594 * in the intuitive sense of priority.
595 */
596RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
597  Priority_Control left,
598  Priority_Control right
599)
600{
601  return _Thread_Priority_less_than( left, right ) ? right : left;
602}
603
604RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
605  Objects_Id id
606)
607{
608  uint32_t the_api;
609
610  the_api = _Objects_Get_API( id );
611
612  if ( !_Objects_Is_api_valid( the_api ) ) {
613    return NULL;
614  }
615
616  /*
617   * Threads are always first class :)
618   *
619   * There is no need to validate the object class of the object identifier,
620   * since this will be done by the object get methods.
621   */
622  return _Objects_Information_table[ the_api ][ 1 ];
623}
624
625/**
626 * @brief Gets a thread by its identifier.
627 *
628 * @see _Objects_Get().
629 */
630Thread_Control *_Thread_Get(
631  Objects_Id         id,
632  ISR_lock_Context  *lock_context
633);
634
635RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
636  const Thread_Control *thread
637)
638{
639#if defined(RTEMS_SMP)
640  return thread->Scheduler.cpu;
641#else
642  (void) thread;
643
644  return _Per_CPU_Get();
645#endif
646}
647
648RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
649  Thread_Control *thread,
650  Per_CPU_Control *cpu
651)
652{
653#if defined(RTEMS_SMP)
654  thread->Scheduler.cpu = cpu;
655#else
656  (void) thread;
657  (void) cpu;
658#endif
659}
660
661/**
662 * This function returns true if the_thread is the currently executing
663 * thread, and false otherwise.
664 */
665
666RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
667  const Thread_Control *the_thread
668)
669{
670  return ( the_thread == _Thread_Executing );
671}
672
673#if defined(RTEMS_SMP)
674/**
675 * @brief Returns @a true in case the thread executes currently on some
676 * processor in the system, otherwise @a false.
677 *
678 * Do not confuse this with _Thread_Is_executing() which checks only the
679 * current processor.
680 */
681RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
682  const Thread_Control *the_thread
683)
684{
685  return _CPU_Context_Get_is_executing( &the_thread->Registers );
686}
687#endif
688
689/**
690 * This function returns true if the_thread is the heir
691 * thread, and false otherwise.
692 */
693
694RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
695  const Thread_Control *the_thread
696)
697{
698  return ( the_thread == _Thread_Heir );
699}
700
701/**
702 * This routine clears any blocking state for the_thread.  It performs
703 * any necessary scheduling operations including the selection of
704 * a new heir thread.
705 */
706
707RTEMS_INLINE_ROUTINE void _Thread_Unblock (
708  Thread_Control *the_thread
709)
710{
711  _Thread_Clear_state( the_thread, STATES_BLOCKED );
712}
713
714/**
715 * This function returns true if the floating point context of
716 * the_thread is currently loaded in the floating point unit, and
717 * false otherwise.
718 */
719
720#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
721RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
722  const Thread_Control *the_thread
723)
724{
725  return ( the_thread == _Thread_Allocated_fp );
726}
727#endif
728
729/*
730 *  If the CPU has hardware floating point, then we must address saving
731 *  and restoring it as part of the context switch.
732 *
733 *  The second conditional compilation section selects the algorithm used
734 *  to context switch between floating point tasks.  The deferred algorithm
735 *  can be significantly better in a system with few floating point tasks
736 *  because it reduces the total number of save and restore FP context
737 *  operations.  However, this algorithm can not be used on all CPUs due
738 *  to unpredictable use of FP registers by some compilers for integer
739 *  operations.
740 */
741
742RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
743{
744#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
745#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
746  if ( executing->fp_context != NULL )
747    _Context_Save_fp( &executing->fp_context );
748#endif
749#endif
750}
751
752RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
753{
754#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
755#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
756  if ( (executing->fp_context != NULL) &&
757       !_Thread_Is_allocated_fp( executing ) ) {
758    if ( _Thread_Allocated_fp != NULL )
759      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
760    _Context_Restore_fp( &executing->fp_context );
761    _Thread_Allocated_fp = executing;
762  }
763#else
764  if ( executing->fp_context != NULL )
765    _Context_Restore_fp( &executing->fp_context );
766#endif
767#endif
768}
769
770/**
771 * This routine is invoked when the currently loaded floating
772 * point context is now longer associated with an active thread.
773 */
774
775#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
776RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
777{
778  _Thread_Allocated_fp = NULL;
779}
780#endif
781
782/**
783 * This function returns true if dispatching is disabled, and false
784 * otherwise.
785 */
786
787RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
788{
789  return ( _Thread_Dispatch_necessary );
790}
791
792/**
793 * This function returns true if the_thread is NULL and false otherwise.
794 */
795
796RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
797  const Thread_Control *the_thread
798)
799{
800  return ( the_thread == NULL );
801}
802
803/**
804 * @brief Is proxy blocking.
805 *
806 * status which indicates that a proxy is blocking, and false otherwise.
807 */
808RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
809  uint32_t   code
810)
811{
812  return (code == THREAD_STATUS_PROXY_BLOCKING);
813}
814
815RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
816{
817  /* Idle threads */
818  uint32_t maximum_internal_threads =
819    rtems_configuration_get_maximum_processors();
820
821  /* MPCI thread */
822#if defined(RTEMS_MULTIPROCESSING)
823  if ( _System_state_Is_multiprocessing ) {
824    ++maximum_internal_threads;
825  }
826#endif
827
828  return maximum_internal_threads;
829}
830
831RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
832{
833  return (Thread_Control *)
834    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
835}
836
837/**
838 * @brief Gets the heir of the processor and makes it executing.
839 *
840 * Must be called with interrupts disabled.  The thread dispatch necessary
841 * indicator is cleared as a side-effect.
842 *
843 * @return The heir thread.
844 *
845 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
846 * _Thread_Dispatch_update_heir().
847 */
848RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
849  Per_CPU_Control *cpu_self
850)
851{
852  Thread_Control *heir;
853
854  heir = cpu_self->heir;
855  cpu_self->dispatch_necessary = false;
856  cpu_self->executing = heir;
857
858  return heir;
859}
860
861RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
862  Thread_Control  *the_thread,
863  Per_CPU_Control *cpu
864)
865{
866  Timestamp_Control last;
867  Timestamp_Control ran;
868
869  last = cpu->cpu_usage_timestamp;
870  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
871  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
872  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
873}
874
875#if defined( RTEMS_SMP )
876RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
877  Per_CPU_Control *cpu_self,
878  Per_CPU_Control *cpu_for_heir,
879  Thread_Control  *heir
880)
881{
882  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
883
884  cpu_for_heir->heir = heir;
885
886  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
887}
888#endif
889
890void _Thread_Get_CPU_time_used(
891  Thread_Control    *the_thread,
892  Timestamp_Control *cpu_time_used
893);
894
895RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
896  Thread_Action_control *action_control
897)
898{
899  _Chain_Initialize_empty( &action_control->Chain );
900}
901
902RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
903  Thread_Action *action
904)
905{
906  _Chain_Set_off_chain( &action->Node );
907}
908
909RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
910  Thread_Control        *the_thread,
911  Thread_Action         *action,
912  Thread_Action_handler  handler
913)
914{
915  Per_CPU_Control *cpu_of_thread;
916
917  _Assert( _Thread_State_is_owner( the_thread ) );
918
919  cpu_of_thread = _Thread_Get_CPU( the_thread );
920
921  action->handler = handler;
922
923  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
924
925  _Chain_Append_if_is_off_chain_unprotected(
926    &the_thread->Post_switch_actions.Chain,
927    &action->Node
928  );
929}
930
931RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
932  Thread_Life_state life_state
933)
934{
935  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
936}
937
938RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
939  Thread_Life_state life_state
940)
941{
942  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
943}
944
945RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
946  Thread_Life_state life_state
947)
948{
949  return ( life_state
950    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
951}
952
953RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
954  Thread_Life_state life_state
955)
956{
957  return ( life_state
958    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
959}
960
961RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
962  const Thread_Control *the_thread
963)
964{
965  _Assert( _Thread_State_is_owner( the_thread ) );
966  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
967}
968
969/**
970 * @brief Returns true if the thread owns resources, and false otherwise.
971 *
972 * Resources are accounted with the Thread_Control::resource_count resource
973 * counter.  This counter is used by mutex objects for example.
974 *
975 * @param[in] the_thread The thread.
976 */
977RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
978  const Thread_Control *the_thread
979)
980{
981  return the_thread->resource_count != 0;
982}
983
984#if defined(RTEMS_SMP)
985RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
986  Thread_Control  *the_thread,
987  Per_CPU_Control *cpu
988)
989{
990  _Per_CPU_Acquire( cpu );
991
992  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
993    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
994    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
995  }
996
997  _Per_CPU_Release( cpu );
998}
999#endif
1000
1001RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_own_node(
1002  const Thread_Control *the_thread
1003)
1004{
1005#if defined(RTEMS_SMP)
1006  return the_thread->Scheduler.own_node;
1007#else
1008  return the_thread->Scheduler.nodes;
1009#endif
1010}
1011
1012RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1013  const Thread_Control *the_thread
1014)
1015{
1016#if defined(RTEMS_SMP)
1017  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1018  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1019    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1020  );
1021#else
1022  return the_thread->Scheduler.nodes;
1023#endif
1024}
1025
1026RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1027  const Thread_Control *the_thread,
1028  size_t                scheduler_index
1029)
1030{
1031#if defined(RTEMS_SMP)
1032  return (Scheduler_Node *)
1033    ( (uintptr_t) the_thread->Scheduler.nodes
1034      + scheduler_index * _Scheduler_Node_size );
1035#else
1036  _Assert( scheduler_index == 0 );
1037  (void) scheduler_index;
1038  return the_thread->Scheduler.nodes;
1039#endif
1040}
1041
1042#if defined(RTEMS_SMP)
1043RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1044  Thread_Control   *the_thread,
1045  ISR_lock_Context *lock_context
1046)
1047{
1048  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1049}
1050
1051RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1052  Thread_Control   *the_thread,
1053  ISR_lock_Context *lock_context
1054)
1055{
1056  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1057}
1058
1059#if defined(RTEMS_SMP)
1060void _Thread_Scheduler_ask_for_help( Thread_Control *the_thread );
1061
1062void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1063#endif
1064
1065RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1066  Thread_Control         *the_thread,
1067  Scheduler_Node         *scheduler_node,
1068  Scheduler_Node_request  request
1069)
1070{
1071  ISR_lock_Context       lock_context;
1072  Scheduler_Node_request current_request;
1073
1074  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1075
1076  current_request = scheduler_node->Thread.request;
1077
1078  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1079    _Assert(
1080      request == SCHEDULER_NODE_REQUEST_ADD
1081        || request == SCHEDULER_NODE_REQUEST_REMOVE
1082    );
1083    _Assert( scheduler_node->Thread.next_request == NULL );
1084    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1085    the_thread->Scheduler.requests = scheduler_node;
1086  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1087    _Assert(
1088      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1089        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1090      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1091        && request == SCHEDULER_NODE_REQUEST_ADD )
1092    );
1093    request = SCHEDULER_NODE_REQUEST_NOTHING;
1094  }
1095
1096  scheduler_node->Thread.request = request;
1097
1098  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1099}
1100
1101RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1102  Thread_Control *the_thread,
1103  Scheduler_Node *scheduler_node
1104)
1105{
1106  _Chain_Append_unprotected(
1107    &the_thread->Scheduler.Wait_nodes,
1108    &scheduler_node->Thread.Wait_node
1109  );
1110  _Thread_Scheduler_add_request(
1111    the_thread,
1112    scheduler_node,
1113    SCHEDULER_NODE_REQUEST_ADD
1114  );
1115}
1116
1117RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1118  Thread_Control *the_thread,
1119  Scheduler_Node *scheduler_node
1120)
1121{
1122  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1123  _Thread_Scheduler_add_request(
1124    the_thread,
1125    scheduler_node,
1126    SCHEDULER_NODE_REQUEST_REMOVE
1127  );
1128}
1129#endif
1130
1131/**
1132 * @brief Returns the priority of the thread.
1133 *
1134 * Returns the user API and thread wait information relevant thread priority.
1135 * This includes temporary thread priority adjustments due to locking
1136 * protocols, a job release or the POSIX sporadic server for example.
1137 *
1138 * @return The priority of the thread.
1139 */
1140RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1141  const Thread_Control *the_thread
1142)
1143{
1144  Scheduler_Node *scheduler_node;
1145
1146  scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1147  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1148}
1149
1150/**
1151 * @brief Acquires the thread wait default lock inside a critical section
1152 * (interrupts disabled).
1153 *
1154 * @param[in] the_thread The thread.
1155 * @param[in] lock_context The lock context used for the corresponding lock
1156 *   release.
1157 *
1158 * @see _Thread_Wait_release_default_critical().
1159 */
1160RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1161  Thread_Control   *the_thread,
1162  ISR_lock_Context *lock_context
1163)
1164{
1165  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1166}
1167
1168/**
1169 * @brief Acquires the thread wait default lock and returns the executing
1170 * thread.
1171 *
1172 * @param[in] lock_context The lock context used for the corresponding lock
1173 *   release.
1174 *
1175 * @return The executing thread.
1176 *
1177 * @see _Thread_Wait_release_default().
1178 */
1179RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1180  ISR_lock_Context *lock_context
1181)
1182{
1183  Thread_Control *executing;
1184
1185  _ISR_lock_ISR_disable( lock_context );
1186  executing = _Thread_Executing;
1187  _Thread_Wait_acquire_default_critical( executing, lock_context );
1188
1189  return executing;
1190}
1191
1192/**
1193 * @brief Acquires the thread wait default lock and disables interrupts.
1194 *
1195 * @param[in] the_thread The thread.
1196 * @param[in] lock_context The lock context used for the corresponding lock
1197 *   release.
1198 *
1199 * @see _Thread_Wait_release_default().
1200 */
1201RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1202  Thread_Control   *the_thread,
1203  ISR_lock_Context *lock_context
1204)
1205{
1206  _ISR_lock_ISR_disable( lock_context );
1207  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1208}
1209
1210/**
1211 * @brief Releases the thread wait default lock inside a critical section
1212 * (interrupts disabled).
1213 *
1214 * The previous interrupt status is not restored.
1215 *
1216 * @param[in] the_thread The thread.
1217 * @param[in] lock_context The lock context used for the corresponding lock
1218 *   acquire.
1219 */
1220RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1221  Thread_Control   *the_thread,
1222  ISR_lock_Context *lock_context
1223)
1224{
1225  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1226}
1227
1228/**
1229 * @brief Releases the thread wait default lock and restores the previous
1230 * interrupt status.
1231 *
1232 * @param[in] the_thread The thread.
1233 * @param[in] lock_context The lock context used for the corresponding lock
1234 *   acquire.
1235 */
1236RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1237  Thread_Control   *the_thread,
1238  ISR_lock_Context *lock_context
1239)
1240{
1241  _Thread_Wait_release_default_critical( the_thread, lock_context );
1242  _ISR_lock_ISR_enable( lock_context );
1243}
1244
1245#if defined(RTEMS_SMP)
1246#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1247  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1248
1249RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1250  Thread_Control            *the_thread,
1251  Thread_queue_Lock_context *queue_lock_context
1252)
1253{
1254  Chain_Node *first;
1255
1256  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1257  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1258
1259  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1260    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1261  }
1262}
1263
1264RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1265  Thread_queue_Queue        *queue,
1266  Thread_queue_Lock_context *queue_lock_context
1267)
1268{
1269  _Thread_queue_Queue_acquire_critical(
1270    queue,
1271    &_Thread_Executing->Potpourri_stats,
1272    &queue_lock_context->Lock_context
1273  );
1274}
1275
1276RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1277  Thread_queue_Queue        *queue,
1278  Thread_queue_Lock_context *queue_lock_context
1279)
1280{
1281  _Thread_queue_Queue_release_critical(
1282    queue,
1283    &queue_lock_context->Lock_context
1284  );
1285}
1286#endif
1287
1288/**
1289 * @brief Acquires the thread wait lock inside a critical section (interrupts
1290 * disabled).
1291 *
1292 * @param[in] the_thread The thread.
1293 * @param[in] queue_context The thread queue context for the corresponding
1294 *   _Thread_Wait_release_critical().
1295 *
1296 * @see _Thread_queue_Context_initialize().
1297 */
1298RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1299  Thread_Control       *the_thread,
1300  Thread_queue_Context *queue_context
1301)
1302{
1303#if defined(RTEMS_SMP)
1304  Thread_queue_Queue *queue;
1305
1306  _Thread_Wait_acquire_default_critical(
1307    the_thread,
1308    &queue_context->Lock_context.Lock_context
1309  );
1310
1311  queue = the_thread->Wait.queue;
1312  queue_context->Lock_context.Wait.queue = queue;
1313
1314  if ( queue != NULL ) {
1315    _Thread_queue_Gate_add(
1316      &the_thread->Wait.Lock.Pending_requests,
1317      &queue_context->Lock_context.Wait.Gate
1318    );
1319    _Thread_Wait_release_default_critical(
1320      the_thread,
1321      &queue_context->Lock_context.Lock_context
1322    );
1323    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1324
1325    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1326      _Thread_Wait_release_queue_critical(
1327        queue,
1328        &queue_context->Lock_context
1329      );
1330      _Thread_Wait_acquire_default_critical(
1331        the_thread,
1332        &queue_context->Lock_context.Lock_context
1333      );
1334      _Thread_Wait_remove_request_locked(
1335        the_thread,
1336        &queue_context->Lock_context
1337      );
1338      _Assert( the_thread->Wait.queue == NULL );
1339    }
1340  }
1341#else
1342  (void) the_thread;
1343  (void) queue_context;
1344#endif
1345}
1346
1347/**
1348 * @brief Acquires the thread wait default lock and disables interrupts.
1349 *
1350 * @param[in] the_thread The thread.
1351 * @param[in] queue_context The thread queue context for the corresponding
1352 *   _Thread_Wait_release().
1353 */
1354RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1355  Thread_Control       *the_thread,
1356  Thread_queue_Context *queue_context
1357)
1358{
1359  _Thread_queue_Context_initialize( queue_context );
1360  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1361  _Thread_Wait_acquire_critical( the_thread, queue_context );
1362}
1363
1364/**
1365 * @brief Releases the thread wait lock inside a critical section (interrupts
1366 * disabled).
1367 *
1368 * The previous interrupt status is not restored.
1369 *
1370 * @param[in] the_thread The thread.
1371 * @param[in] queue_context The thread queue context used for corresponding
1372 *   _Thread_Wait_acquire_critical().
1373 */
1374RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1375  Thread_Control       *the_thread,
1376  Thread_queue_Context *queue_context
1377)
1378{
1379#if defined(RTEMS_SMP)
1380  Thread_queue_Queue *queue;
1381
1382  queue = queue_context->Lock_context.Wait.queue;
1383
1384  if ( queue != NULL ) {
1385    _Thread_Wait_release_queue_critical(
1386      queue, &queue_context->Lock_context
1387    );
1388    _Thread_Wait_acquire_default_critical(
1389      the_thread,
1390      &queue_context->Lock_context.Lock_context
1391    );
1392    _Thread_Wait_remove_request_locked(
1393      the_thread,
1394      &queue_context->Lock_context
1395    );
1396  }
1397
1398  _Thread_Wait_release_default_critical(
1399    the_thread,
1400    &queue_context->Lock_context.Lock_context
1401  );
1402#else
1403  (void) the_thread;
1404  (void) queue_context;
1405#endif
1406}
1407
1408/**
1409 * @brief Releases the thread wait lock and restores the previous interrupt
1410 * status.
1411 *
1412 * @param[in] the_thread The thread.
1413 * @param[in] queue_context The thread queue context used for corresponding
1414 *   _Thread_Wait_acquire().
1415 */
1416RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1417  Thread_Control       *the_thread,
1418  Thread_queue_Context *queue_context
1419)
1420{
1421  _Thread_Wait_release_critical( the_thread, queue_context );
1422  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1423}
1424
1425/**
1426 * @brief Claims the thread wait queue.
1427 *
1428 * The caller must not be the owner of the default thread wait lock.  The
1429 * caller must be the owner of the corresponding thread queue lock.  The
1430 * registration of the corresponding thread queue operations is deferred and
1431 * done after the deadlock detection.  This is crucial to support timeouts on
1432 * SMP configurations.
1433 *
1434 * @param[in] the_thread The thread.
1435 * @param[in] queue The new thread queue.
1436 *
1437 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
1438 */
1439RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1440  Thread_Control     *the_thread,
1441  Thread_queue_Queue *queue
1442)
1443{
1444  ISR_lock_Context lock_context;
1445
1446  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1447
1448  _Assert( the_thread->Wait.queue == NULL );
1449
1450#if defined(RTEMS_SMP)
1451  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1452  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1453  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1454#endif
1455
1456  the_thread->Wait.queue = queue;
1457
1458  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1459}
1460
1461/**
1462 * @brief Finalizes the thread wait queue claim via registration of the
1463 * corresponding thread queue operations.
1464 *
1465 * @param[in] the_thread The thread.
1466 * @param[in] operations The corresponding thread queue operations.
1467 */
1468RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
1469  Thread_Control                *the_thread,
1470  const Thread_queue_Operations *operations
1471)
1472{
1473  the_thread->Wait.operations = operations;
1474}
1475
1476/**
1477 * @brief Removes a thread wait lock request.
1478 *
1479 * On SMP configurations, removes a thread wait lock request.
1480 *
1481 * On other configurations, this function does nothing.
1482 *
1483 * @param[in] the_thread The thread.
1484 * @param[in] queue_lock_context The thread queue lock context used for
1485 *   corresponding _Thread_Wait_acquire().
1486 */
1487RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1488  Thread_Control            *the_thread,
1489  Thread_queue_Lock_context *queue_lock_context
1490)
1491{
1492#if defined(RTEMS_SMP)
1493  ISR_lock_Context lock_context;
1494
1495  _Thread_Wait_acquire_default( the_thread, &lock_context );
1496  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1497  _Thread_Wait_release_default( the_thread, &lock_context );
1498#else
1499  (void) the_thread;
1500  (void) queue_lock_context;
1501#endif
1502}
1503
1504/**
1505 * @brief Restores the default thread wait queue and operations.
1506 *
1507 * The caller must be the owner of the current thread wait queue lock.
1508 *
1509 * On SMP configurations, the pending requests are updated to use the stale
1510 * thread queue operations.
1511 *
1512 * @param[in] the_thread The thread.
1513 *
1514 * @see _Thread_Wait_claim().
1515 */
1516RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1517  Thread_Control *the_thread
1518)
1519{
1520#if defined(RTEMS_SMP)
1521  ISR_lock_Context  lock_context;
1522  Chain_Node       *node;
1523  const Chain_Node *tail;
1524
1525  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1526
1527  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1528  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1529
1530  if ( node != tail ) {
1531    do {
1532      Thread_queue_Context *queue_context;
1533
1534      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1535      queue_context->Lock_context.Wait.queue = NULL;
1536
1537      node = _Chain_Next( node );
1538    } while ( node != tail );
1539
1540    _Thread_queue_Gate_add(
1541      &the_thread->Wait.Lock.Pending_requests,
1542      &the_thread->Wait.Lock.Tranquilizer
1543    );
1544  } else {
1545    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1546  }
1547#endif
1548
1549  the_thread->Wait.queue = NULL;
1550  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1551
1552#if defined(RTEMS_SMP)
1553  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1554#endif
1555}
1556
1557/**
1558 * @brief Tranquilizes the thread after a wait on a thread queue.
1559 *
1560 * After the violent blocking procedure this function makes the thread calm and
1561 * peaceful again so that it can carry out its normal work.
1562 *
1563 * On SMP configurations, ensures that all pending thread wait lock requests
1564 * completed before the thread is able to begin a new thread wait procedure.
1565 *
1566 * On other configurations, this function does nothing.
1567 *
1568 * It must be called after a _Thread_Wait_claim() exactly once
1569 *  - after the corresponding thread queue lock was released, and
1570 *  - the default wait state is restored or some other processor is about to do
1571 *    this.
1572 *
1573 * @param[in] the_thread The thread.
1574 */
1575RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1576  Thread_Control *the_thread
1577)
1578{
1579#if defined(RTEMS_SMP)
1580  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1581#else
1582  (void) the_thread;
1583#endif
1584}
1585
1586/**
1587 * @brief Cancels a thread wait on a thread queue.
1588 *
1589 * @param[in] the_thread The thread.
1590 * @param[in] queue_context The thread queue context used for corresponding
1591 *   _Thread_Wait_acquire().
1592 */
1593RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1594  Thread_Control       *the_thread,
1595  Thread_queue_Context *queue_context
1596)
1597{
1598  Thread_queue_Queue *queue;
1599
1600  queue = the_thread->Wait.queue;
1601
1602#if defined(RTEMS_SMP)
1603  if ( queue != NULL ) {
1604    _Assert( queue_context->Lock_context.Wait.queue == queue );
1605#endif
1606
1607    ( *the_thread->Wait.operations->extract )(
1608      queue,
1609      the_thread,
1610      queue_context
1611    );
1612    _Thread_Wait_restore_default( the_thread );
1613
1614#if defined(RTEMS_SMP)
1615    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1616    queue_context->Lock_context.Wait.queue = queue;
1617  }
1618#endif
1619}
1620
1621/**
1622 * @brief The initial thread wait flags value set by _Thread_Initialize().
1623 */
1624#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1625
1626/**
1627 * @brief Mask to get the thread wait state flags.
1628 */
1629#define THREAD_WAIT_STATE_MASK 0xffU
1630
1631/**
1632 * @brief Indicates that the thread begins with the blocking operation.
1633 *
1634 * A blocking operation consists of an optional watchdog initialization and the
1635 * setting of the appropriate thread blocking state with the corresponding
1636 * scheduler block operation.
1637 */
1638#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1639
1640/**
1641 * @brief Indicates that the thread completed the blocking operation.
1642 */
1643#define THREAD_WAIT_STATE_BLOCKED 0x2U
1644
1645/**
1646 * @brief Indicates that a condition to end the thread wait occurred.
1647 *
1648 * This could be a timeout, a signal, an event or a resource availability.
1649 */
1650#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1651
1652/**
1653 * @brief Mask to get the thread wait class flags.
1654 */
1655#define THREAD_WAIT_CLASS_MASK 0xff00U
1656
1657/**
1658 * @brief Indicates that the thread waits for an event.
1659 */
1660#define THREAD_WAIT_CLASS_EVENT 0x100U
1661
1662/**
1663 * @brief Indicates that the thread waits for a system event.
1664 */
1665#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1666
1667/**
1668 * @brief Indicates that the thread waits for an object.
1669 */
1670#define THREAD_WAIT_CLASS_OBJECT 0x400U
1671
1672/**
1673 * @brief Indicates that the thread waits for a period.
1674 */
1675#define THREAD_WAIT_CLASS_PERIOD 0x800U
1676
1677RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1678  Thread_Control    *the_thread,
1679  Thread_Wait_flags  flags
1680)
1681{
1682#if defined(RTEMS_SMP)
1683  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1684#else
1685  the_thread->Wait.flags = flags;
1686#endif
1687}
1688
1689RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1690  const Thread_Control *the_thread
1691)
1692{
1693#if defined(RTEMS_SMP)
1694  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1695#else
1696  return the_thread->Wait.flags;
1697#endif
1698}
1699
1700RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
1701  const Thread_Control *the_thread
1702)
1703{
1704#if defined(RTEMS_SMP)
1705  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
1706#else
1707  return the_thread->Wait.flags;
1708#endif
1709}
1710
1711/**
1712 * @brief Tries to change the thread wait flags with release semantics in case
1713 * of success.
1714 *
1715 * Must be called inside a critical section (interrupts disabled).
1716 *
1717 * In case the wait flags are equal to the expected wait flags, then the wait
1718 * flags are set to the desired wait flags.
1719 *
1720 * @param[in] the_thread The thread.
1721 * @param[in] expected_flags The expected wait flags.
1722 * @param[in] desired_flags The desired wait flags.
1723 *
1724 * @retval true The wait flags were equal to the expected wait flags.
1725 * @retval false Otherwise.
1726 */
1727RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1728  Thread_Control    *the_thread,
1729  Thread_Wait_flags  expected_flags,
1730  Thread_Wait_flags  desired_flags
1731)
1732{
1733  _Assert( _ISR_Get_level() != 0 );
1734
1735#if defined(RTEMS_SMP)
1736  return _Atomic_Compare_exchange_uint(
1737    &the_thread->Wait.flags,
1738    &expected_flags,
1739    desired_flags,
1740    ATOMIC_ORDER_RELEASE,
1741    ATOMIC_ORDER_RELAXED
1742  );
1743#else
1744  bool success = ( the_thread->Wait.flags == expected_flags );
1745
1746  if ( success ) {
1747    the_thread->Wait.flags = desired_flags;
1748  }
1749
1750  return success;
1751#endif
1752}
1753
1754/**
1755 * @brief Tries to change the thread wait flags with acquire semantics.
1756 *
1757 * In case the wait flags are equal to the expected wait flags, then the wait
1758 * flags are set to the desired wait flags.
1759 *
1760 * @param[in] the_thread The thread.
1761 * @param[in] expected_flags The expected wait flags.
1762 * @param[in] desired_flags The desired wait flags.
1763 *
1764 * @retval true The wait flags were equal to the expected wait flags.
1765 * @retval false Otherwise.
1766 */
1767RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1768  Thread_Control    *the_thread,
1769  Thread_Wait_flags  expected_flags,
1770  Thread_Wait_flags  desired_flags
1771)
1772{
1773  bool success;
1774#if defined(RTEMS_SMP)
1775  return _Atomic_Compare_exchange_uint(
1776    &the_thread->Wait.flags,
1777    &expected_flags,
1778    desired_flags,
1779    ATOMIC_ORDER_ACQUIRE,
1780    ATOMIC_ORDER_ACQUIRE
1781  );
1782#else
1783  ISR_Level level;
1784
1785  _ISR_Local_disable( level );
1786
1787  success = _Thread_Wait_flags_try_change_release(
1788    the_thread,
1789    expected_flags,
1790    desired_flags
1791  );
1792
1793  _ISR_Local_enable( level );
1794#endif
1795
1796  return success;
1797}
1798
1799/**
1800 * @brief Returns the object identifier of the object containing the current
1801 * thread wait queue.
1802 *
1803 * This function may be used for debug and system information purposes.  The
1804 * caller must be the owner of the thread lock.
1805 *
1806 * @retval 0 The thread waits on no thread queue currently, the thread wait
1807 *   queue is not contained in an object, or the current thread state provides
1808 *   insufficient information, e.g. the thread is in the middle of a blocking
1809 *   operation.
1810 * @retval other The object identifier of the object containing the thread wait
1811 *   queue.
1812 */
1813Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1814
1815RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1816  const Thread_Control *the_thread
1817)
1818{
1819  return (Status_Control) the_thread->Wait.return_code;
1820}
1821
1822/**
1823 * @brief General purpose thread wait timeout.
1824 *
1825 * @param[in] watchdog The thread timer watchdog.
1826 */
1827void _Thread_Timeout( Watchdog_Control *watchdog );
1828
1829RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1830  Thread_Timer_information *timer,
1831  Per_CPU_Control          *cpu
1832)
1833{
1834  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1835  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1836  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1837}
1838
1839RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative(
1840  Thread_Control                 *the_thread,
1841  Per_CPU_Control                *cpu,
1842  Watchdog_Service_routine_entry  routine,
1843  Watchdog_Interval               ticks
1844)
1845{
1846  ISR_lock_Context lock_context;
1847
1848  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1849
1850  the_thread->Timer.header =
1851    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1852  the_thread->Timer.Watchdog.routine = routine;
1853  _Watchdog_Per_CPU_insert_relative( &the_thread->Timer.Watchdog, cpu, ticks );
1854
1855  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1856}
1857
1858RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_absolute(
1859  Thread_Control                 *the_thread,
1860  Per_CPU_Control                *cpu,
1861  Watchdog_Service_routine_entry  routine,
1862  uint64_t                        expire
1863)
1864{
1865  ISR_lock_Context lock_context;
1866
1867  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1868
1869  the_thread->Timer.header =
1870    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_ABSOLUTE ];
1871  the_thread->Timer.Watchdog.routine = routine;
1872  _Watchdog_Per_CPU_insert_absolute( &the_thread->Timer.Watchdog, cpu, expire );
1873
1874  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1875}
1876
1877RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1878{
1879  ISR_lock_Context lock_context;
1880
1881  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1882
1883  _Watchdog_Per_CPU_remove(
1884    &the_thread->Timer.Watchdog,
1885#if defined(RTEMS_SMP)
1886    the_thread->Timer.Watchdog.cpu,
1887#else
1888    _Per_CPU_Get(),
1889#endif
1890    the_thread->Timer.header
1891  );
1892
1893  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1894}
1895
1896RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1897  Thread_Control     *the_thread,
1898  Thread_queue_Queue *queue
1899)
1900{
1901  _Thread_Wait_tranquilize( the_thread );
1902  _Thread_Timer_remove( the_thread );
1903
1904#if defined(RTEMS_MULTIPROCESSING)
1905  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1906    _Thread_Unblock( the_thread );
1907  } else {
1908    _Thread_queue_Unblock_proxy( queue, the_thread );
1909  }
1910#else
1911  (void) queue;
1912  _Thread_Unblock( the_thread );
1913#endif
1914}
1915
1916/** @}*/
1917
1918#ifdef __cplusplus
1919}
1920#endif
1921
1922#if defined(RTEMS_MULTIPROCESSING)
1923#include <rtems/score/threadmp.h>
1924#endif
1925
1926#endif
1927/* end of include file */
Note: See TracBrowser for help on using the repository browser.