source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 6771359f

5
Last change on this file since 6771359f was 6771359f, checked in by Sebastian Huber <sebastian.huber@…>, on Oct 27, 2016 at 4:42:06 AM

score: Second part of new MrsP implementation

Update #2556.

  • Property mode set to 100644
File size: 50.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2016 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/schedulernodeimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/status.h>
33#include <rtems/score/sysstate.h>
34#include <rtems/score/threadqimpl.h>
35#include <rtems/score/todimpl.h>
36#include <rtems/score/freechain.h>
37#include <rtems/score/watchdogimpl.h>
38#include <rtems/config.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
44/**
45 * @addtogroup ScoreThread
46 */
47/**@{**/
48
49/**
50 *  The following structure contains the information necessary to manage
51 *  a thread which it is  waiting for a resource.
52 */
53#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
54
55/**
56 *  Self for the GNU Ada Run-Time
57 */
58extern void *rtems_ada_self;
59
60typedef struct {
61  Objects_Information Objects;
62
63  Freechain_Control Free_thread_queue_heads;
64} Thread_Information;
65
66/**
67 *  The following defines the information control block used to
68 *  manage this class of objects.
69 */
70extern Thread_Information _Thread_Internal_information;
71
72/**
73 *  The following points to the thread whose floating point
74 *  context is currently loaded.
75 */
76#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
77extern Thread_Control *_Thread_Allocated_fp;
78#endif
79
80#if defined(RTEMS_SMP)
81#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
82  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
83#endif
84
85typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
86
87void _Thread_Iterate(
88  Thread_Visitor  visitor,
89  void           *arg
90);
91
92void _Thread_Initialize_information(
93  Thread_Information  *information,
94  Objects_APIs         the_api,
95  uint16_t             the_class,
96  uint32_t             maximum,
97  bool                 is_string,
98  uint32_t             maximum_name_length
99);
100
101/**
102 *  @brief Initialize thread handler.
103 *
104 *  This routine performs the initialization necessary for this handler.
105 */
106void _Thread_Handler_initialization(void);
107
108/**
109 *  @brief Create idle thread.
110 *
111 *  This routine creates the idle thread.
112 *
113 *  @warning No thread should be created before this one.
114 */
115void _Thread_Create_idle(void);
116
117/**
118 *  @brief Start thread multitasking.
119 *
120 *  This routine initiates multitasking.  It is invoked only as
121 *  part of initialization and its invocation is the last act of
122 *  the non-multitasking part of the system initialization.
123 */
124void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
125
126/**
127 *  @brief Allocate the requested stack space for the thread.
128 *
129 *  Allocate the requested stack space for the thread.
130 *  Set the Start.stack field to the address of the stack.
131 *
132 *  @param[in] the_thread is the thread where the stack space is requested
133 *  @param[in] stack_size is the stack space is requested
134 *
135 *  @retval actual size allocated after any adjustment
136 *  @retval zero if the allocation failed
137 */
138size_t _Thread_Stack_Allocate(
139  Thread_Control *the_thread,
140  size_t          stack_size
141);
142
143/**
144 *  @brief Deallocate thread stack.
145 *
146 *  Deallocate the Thread's stack.
147 */
148void _Thread_Stack_Free(
149  Thread_Control *the_thread
150);
151
152/**
153 *  @brief Initialize thread.
154 *
155 *  This routine initializes the specified the thread.  It allocates
156 *  all memory associated with this thread.  It completes by adding
157 *  the thread to the local object table so operations on this
158 *  thread id are allowed.
159 *
160 *  @note If stack_area is NULL, it is allocated from the workspace.
161 *
162 *  @note If the stack is allocated from the workspace, then it is
163 *        guaranteed to be of at least minimum size.
164 */
165bool _Thread_Initialize(
166  Thread_Information                   *information,
167  Thread_Control                       *the_thread,
168  const struct Scheduler_Control       *scheduler,
169  void                                 *stack_area,
170  size_t                                stack_size,
171  bool                                  is_fp,
172  Priority_Control                      priority,
173  bool                                  is_preemptible,
174  Thread_CPU_budget_algorithms          budget_algorithm,
175  Thread_CPU_budget_algorithm_callout   budget_callout,
176  uint32_t                              isr_level,
177  Objects_Name                          name
178);
179
180/**
181 *  @brief Initializes thread and executes it.
182 *
183 *  This routine initializes the executable information for a thread
184 *  and makes it ready to execute.  After this routine executes, the
185 *  thread competes with all other threads for CPU time.
186 *
187 *  @param the_thread The thread to be started.
188 *  @param entry The thread entry information.
189 */
190bool _Thread_Start(
191  Thread_Control                 *the_thread,
192  const Thread_Entry_information *entry,
193  ISR_lock_Context               *lock_context
194);
195
196void _Thread_Restart_self(
197  Thread_Control                 *executing,
198  const Thread_Entry_information *entry,
199  ISR_lock_Context               *lock_context
200) RTEMS_NO_RETURN;
201
202bool _Thread_Restart_other(
203  Thread_Control                 *the_thread,
204  const Thread_Entry_information *entry,
205  ISR_lock_Context               *lock_context
206);
207
208void _Thread_Yield( Thread_Control *executing );
209
210Thread_Life_state _Thread_Change_life(
211  Thread_Life_state clear,
212  Thread_Life_state set,
213  Thread_Life_state ignore
214);
215
216Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
217
218/**
219 * @brief Kills all zombie threads in the system.
220 *
221 * Threads change into the zombie state as the last step in the thread
222 * termination sequence right before a context switch to the heir thread is
223 * initiated.  Since the thread stack is still in use during this phase we have
224 * to postpone the thread stack reclamation until this point.  On SMP
225 * configurations we may have to busy wait for context switch completion here.
226 */
227void _Thread_Kill_zombies( void );
228
229void _Thread_Exit(
230  Thread_Control    *executing,
231  Thread_Life_state  set,
232  void              *exit_value
233);
234
235void _Thread_Join(
236  Thread_Control       *the_thread,
237  States_Control        waiting_for_join,
238  Thread_Control       *executing,
239  Thread_queue_Context *queue_context
240);
241
242void _Thread_Cancel(
243  Thread_Control *the_thread,
244  Thread_Control *executing,
245  void           *exit_value
246);
247
248/**
249 * @brief Closes the thread.
250 *
251 * Closes the thread object and starts the thread termination sequence.  In
252 * case the executing thread is not terminated, then this function waits until
253 * the terminating thread reached the zombie state.
254 */
255void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
256
257RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
258{
259  return _States_Is_ready( the_thread->current_state );
260}
261
262States_Control _Thread_Clear_state_locked(
263  Thread_Control *the_thread,
264  States_Control  state
265);
266
267/**
268 * @brief Clears the specified thread state.
269 *
270 * In case the previous state is a non-ready state and the next state is the
271 * ready state, then the thread is unblocked by the scheduler.
272 *
273 * @param[in] the_thread The thread.
274 * @param[in] state The state to clear.  It must not be zero.
275 *
276 * @return The previous state.
277 */
278States_Control _Thread_Clear_state(
279  Thread_Control *the_thread,
280  States_Control  state
281);
282
283States_Control _Thread_Set_state_locked(
284  Thread_Control *the_thread,
285  States_Control  state
286);
287
288/**
289 * @brief Sets the specified thread state.
290 *
291 * In case the previous state is the ready state, then the thread is blocked by
292 * the scheduler.
293 *
294 * @param[in] the_thread The thread.
295 * @param[in] state The state to set.  It must not be zero.
296 *
297 * @return The previous state.
298 */
299States_Control _Thread_Set_state(
300  Thread_Control *the_thread,
301  States_Control  state
302);
303
304/**
305 *  @brief Initializes enviroment for a thread.
306 *
307 *  This routine initializes the context of @a the_thread to its
308 *  appropriate starting state.
309 *
310 *  @param[in] the_thread is the pointer to the thread control block.
311 */
312void _Thread_Load_environment(
313  Thread_Control *the_thread
314);
315
316void _Thread_Entry_adaptor_idle( Thread_Control *executing );
317
318void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
319
320void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
321
322/**
323 *  @brief Wrapper function for all threads.
324 *
325 *  This routine is the wrapper function for all threads.  It is
326 *  the starting point for all threads.  The user provided thread
327 *  entry point is invoked by this routine.  Operations
328 *  which must be performed immediately before and after the user's
329 *  thread executes are found here.
330 *
331 *  @note On entry, it is assumed all interrupts are blocked and that this
332 *  routine needs to set the initial isr level.  This may or may not
333 *  actually be needed by the context switch routine and as a result
334 *  interrupts may already be at there proper level.  Either way,
335 *  setting the initial isr level properly here is safe.
336 */
337void _Thread_Handler( void );
338
339/**
340 * @brief Executes the global constructors and then restarts itself as the
341 * first initialization thread.
342 *
343 * The first initialization thread is the first RTEMS initialization task or
344 * the first POSIX initialization thread in case no RTEMS initialization tasks
345 * are present.
346 */
347void _Thread_Global_construction(
348  Thread_Control                 *executing,
349  const Thread_Entry_information *entry
350) RTEMS_NO_RETURN;
351
352/**
353 *  @brief Ended the delay of a thread.
354 *
355 *  This routine is invoked when a thread must be unblocked at the
356 *  end of a time based delay (i.e. wake after or wake when).
357 *  It is called by the watchdog handler.
358 *
359 *  @param[in] id is the thread id
360 *  @param[in] ignored is not used
361 */
362void _Thread_Delay_ended(
363  Objects_Id  id,
364  void       *ignored
365);
366
367RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
368  Thread_Control   *the_thread,
369  ISR_lock_Context *lock_context
370)
371{
372  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
373}
374
375RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
376  Thread_Control   *the_thread,
377  ISR_lock_Context *lock_context
378)
379{
380  _ISR_lock_ISR_disable( lock_context );
381  _Thread_State_acquire_critical( the_thread, lock_context );
382}
383
384RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
385  ISR_lock_Context *lock_context
386)
387{
388  Thread_Control *executing;
389
390  _ISR_lock_ISR_disable( lock_context );
391  executing = _Thread_Executing;
392  _Thread_State_acquire_critical( executing, lock_context );
393
394  return executing;
395}
396
397RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
398  Thread_Control   *the_thread,
399  ISR_lock_Context *lock_context
400)
401{
402  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
403}
404
405RTEMS_INLINE_ROUTINE void _Thread_State_release(
406  Thread_Control   *the_thread,
407  ISR_lock_Context *lock_context
408)
409{
410  _Thread_State_release_critical( the_thread, lock_context );
411  _ISR_lock_ISR_enable( lock_context );
412}
413
414#if defined(RTEMS_DEBUG)
415RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
416  const Thread_Control *the_thread
417)
418{
419  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
420}
421#endif
422
423/**
424 * @brief Performs the priority actions specified by the thread queue context
425 * along the thread queue path.
426 *
427 * The caller must be the owner of the thread wait lock.
428 *
429 * @param start_of_path The start thread of the thread queue path.
430 * @param queue_context The thread queue context specifying the thread queue
431 *   path and initial thread priority actions.
432 *
433 * @see _Thread_queue_Path_acquire_critical().
434 */
435void _Thread_Priority_perform_actions(
436  Thread_Control       *start_of_path,
437  Thread_queue_Context *queue_context
438);
439
440/**
441 * @brief Adds the specified thread priority node to the corresponding thread
442 * priority aggregation.
443 *
444 * The caller must be the owner of the thread wait lock.
445 *
446 * @param the_thread The thread.
447 * @param priority_node The thread priority node to add.
448 * @param queue_context The thread queue context to return an updated set of
449 *   threads for _Thread_Priority_update().  The thread queue context must be
450 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
451 *   call of this function.
452 *
453 * @see _Thread_Wait_acquire().
454 */
455void _Thread_Priority_add(
456  Thread_Control       *the_thread,
457  Priority_Node        *priority_node,
458  Thread_queue_Context *queue_context
459);
460
461/**
462 * @brief Removes the specified thread priority node from the corresponding
463 * thread priority aggregation.
464 *
465 * The caller must be the owner of the thread wait lock.
466 *
467 * @param the_thread The thread.
468 * @param priority_node The thread priority node to remove.
469 * @param queue_context The thread queue context to return an updated set of
470 *   threads for _Thread_Priority_update().  The thread queue context must be
471 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
472 *   call of this function.
473 *
474 * @see _Thread_Wait_acquire().
475 */
476void _Thread_Priority_remove(
477  Thread_Control       *the_thread,
478  Priority_Node        *priority_node,
479  Thread_queue_Context *queue_context
480);
481
482/**
483 * @brief Propagates a thread priority value change in the specified thread
484 * priority node to the corresponding thread priority aggregation.
485 *
486 * The caller must be the owner of the thread wait lock.
487 *
488 * @param the_thread The thread.
489 * @param priority_node The thread priority node to change.
490 * @param prepend_it In case this is true, then the thread is prepended to
491 *   its priority group in its home scheduler instance, otherwise it is
492 *   appended.
493 * @param queue_context The thread queue context to return an updated set of
494 *   threads for _Thread_Priority_update().  The thread queue context must be
495 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
496 *   call of this function.
497 *
498 * @see _Thread_Wait_acquire().
499 */
500void _Thread_Priority_changed(
501  Thread_Control       *the_thread,
502  Priority_Node        *priority_node,
503  bool                  prepend_it,
504  Thread_queue_Context *queue_context
505);
506
507/**
508 * @brief Changes the thread priority value of the specified thread priority
509 * node in the corresponding thread priority aggregation.
510 *
511 * The caller must be the owner of the thread wait lock.
512 *
513 * @param the_thread The thread.
514 * @param priority_node The thread priority node to change.
515 * @param new_priority The new thread priority value of the thread priority
516 *   node to change.
517 * @param prepend_it In case this is true, then the thread is prepended to
518 *   its priority group in its home scheduler instance, otherwise it is
519 *   appended.
520 * @param queue_context The thread queue context to return an updated set of
521 *   threads for _Thread_Priority_update().  The thread queue context must be
522 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
523 *   call of this function.
524 *
525 * @see _Thread_Wait_acquire().
526 */
527RTEMS_INLINE_ROUTINE void _Thread_Priority_change(
528  Thread_Control       *the_thread,
529  Priority_Node        *priority_node,
530  Priority_Control      new_priority,
531  bool                  prepend_it,
532  Thread_queue_Context *queue_context
533)
534{
535  _Priority_Node_set_priority( priority_node, new_priority );
536  _Thread_Priority_changed(
537    the_thread,
538    priority_node,
539    prepend_it,
540    queue_context
541  );
542}
543
544/**
545 * @brief Replaces the victim priority node with the replacement priority node
546 * in the corresponding thread priority aggregation.
547 *
548 * The caller must be the owner of the thread wait lock.
549 *
550 * @param the_thread The thread.
551 * @param victim_node The victim thread priority node.
552 * @param replacement_node The replacement thread priority node.
553 *
554 * @see _Thread_Wait_acquire().
555 */
556void _Thread_Priority_replace(
557  Thread_Control *the_thread,
558  Priority_Node  *victim_node,
559  Priority_Node  *replacement_node
560);
561
562/**
563 * @brief Adds a priority node to the corresponding thread priority
564 * aggregation.
565 *
566 * The caller must be the owner of the thread wait lock.
567 *
568 * @param the_thread The thread.
569 * @param priority_node The thread priority node to add.
570 * @param queue_context The thread queue context to return an updated set of
571 *   threads for _Thread_Priority_update().  The thread queue context must be
572 *   initialized via _Thread_queue_Context_clear_priority_updates() before a
573 *   call of this function.
574 *
575 * @see _Thread_Priority_add(), _Thread_Priority_change(),
576 *   _Thread_Priority_changed() and _Thread_Priority_remove().
577 */
578void _Thread_Priority_update( Thread_queue_Context *queue_context );
579
580#if defined(RTEMS_SMP)
581void _Thread_Priority_and_sticky_update(
582  Thread_Control *the_thread,
583  int             sticky_level_change
584);
585#endif
586
587/**
588 * @brief Returns true if the left thread priority is less than the right
589 * thread priority in the intuitive sense of priority and false otherwise.
590 */
591RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
592  Priority_Control left,
593  Priority_Control right
594)
595{
596  return left > right;
597}
598
599/**
600 * @brief Returns the highest priority of the left and right thread priorities
601 * in the intuitive sense of priority.
602 */
603RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
604  Priority_Control left,
605  Priority_Control right
606)
607{
608  return _Thread_Priority_less_than( left, right ) ? right : left;
609}
610
611RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
612  Objects_Id id
613)
614{
615  uint32_t the_api;
616
617  the_api = _Objects_Get_API( id );
618
619  if ( !_Objects_Is_api_valid( the_api ) ) {
620    return NULL;
621  }
622
623  /*
624   * Threads are always first class :)
625   *
626   * There is no need to validate the object class of the object identifier,
627   * since this will be done by the object get methods.
628   */
629  return _Objects_Information_table[ the_api ][ 1 ];
630}
631
632/**
633 * @brief Gets a thread by its identifier.
634 *
635 * @see _Objects_Get().
636 */
637Thread_Control *_Thread_Get(
638  Objects_Id         id,
639  ISR_lock_Context  *lock_context
640);
641
642RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
643  const Thread_Control *thread
644)
645{
646#if defined(RTEMS_SMP)
647  return thread->Scheduler.cpu;
648#else
649  (void) thread;
650
651  return _Per_CPU_Get();
652#endif
653}
654
655RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
656  Thread_Control *thread,
657  Per_CPU_Control *cpu
658)
659{
660#if defined(RTEMS_SMP)
661  thread->Scheduler.cpu = cpu;
662#else
663  (void) thread;
664  (void) cpu;
665#endif
666}
667
668/**
669 * This function returns true if the_thread is the currently executing
670 * thread, and false otherwise.
671 */
672
673RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
674  const Thread_Control *the_thread
675)
676{
677  return ( the_thread == _Thread_Executing );
678}
679
680#if defined(RTEMS_SMP)
681/**
682 * @brief Returns @a true in case the thread executes currently on some
683 * processor in the system, otherwise @a false.
684 *
685 * Do not confuse this with _Thread_Is_executing() which checks only the
686 * current processor.
687 */
688RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
689  const Thread_Control *the_thread
690)
691{
692  return _CPU_Context_Get_is_executing( &the_thread->Registers );
693}
694#endif
695
696/**
697 * This function returns true if the_thread is the heir
698 * thread, and false otherwise.
699 */
700
701RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
702  const Thread_Control *the_thread
703)
704{
705  return ( the_thread == _Thread_Heir );
706}
707
708/**
709 * This routine clears any blocking state for the_thread.  It performs
710 * any necessary scheduling operations including the selection of
711 * a new heir thread.
712 */
713
714RTEMS_INLINE_ROUTINE void _Thread_Unblock (
715  Thread_Control *the_thread
716)
717{
718  _Thread_Clear_state( the_thread, STATES_BLOCKED );
719}
720
721/**
722 * This function returns true if the floating point context of
723 * the_thread is currently loaded in the floating point unit, and
724 * false otherwise.
725 */
726
727#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
728RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
729  const Thread_Control *the_thread
730)
731{
732  return ( the_thread == _Thread_Allocated_fp );
733}
734#endif
735
736/*
737 *  If the CPU has hardware floating point, then we must address saving
738 *  and restoring it as part of the context switch.
739 *
740 *  The second conditional compilation section selects the algorithm used
741 *  to context switch between floating point tasks.  The deferred algorithm
742 *  can be significantly better in a system with few floating point tasks
743 *  because it reduces the total number of save and restore FP context
744 *  operations.  However, this algorithm can not be used on all CPUs due
745 *  to unpredictable use of FP registers by some compilers for integer
746 *  operations.
747 */
748
749RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
750{
751#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
752#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
753  if ( executing->fp_context != NULL )
754    _Context_Save_fp( &executing->fp_context );
755#endif
756#endif
757}
758
759RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
760{
761#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
762#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
763  if ( (executing->fp_context != NULL) &&
764       !_Thread_Is_allocated_fp( executing ) ) {
765    if ( _Thread_Allocated_fp != NULL )
766      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
767    _Context_Restore_fp( &executing->fp_context );
768    _Thread_Allocated_fp = executing;
769  }
770#else
771  if ( executing->fp_context != NULL )
772    _Context_Restore_fp( &executing->fp_context );
773#endif
774#endif
775}
776
777/**
778 * This routine is invoked when the currently loaded floating
779 * point context is now longer associated with an active thread.
780 */
781
782#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
783RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
784{
785  _Thread_Allocated_fp = NULL;
786}
787#endif
788
789/**
790 * This function returns true if dispatching is disabled, and false
791 * otherwise.
792 */
793
794RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
795{
796  return ( _Thread_Dispatch_necessary );
797}
798
799/**
800 * This function returns true if the_thread is NULL and false otherwise.
801 */
802
803RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
804  const Thread_Control *the_thread
805)
806{
807  return ( the_thread == NULL );
808}
809
810/**
811 * @brief Is proxy blocking.
812 *
813 * status which indicates that a proxy is blocking, and false otherwise.
814 */
815RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
816  uint32_t   code
817)
818{
819  return (code == THREAD_STATUS_PROXY_BLOCKING);
820}
821
822RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
823{
824  /* Idle threads */
825  uint32_t maximum_internal_threads =
826    rtems_configuration_get_maximum_processors();
827
828  /* MPCI thread */
829#if defined(RTEMS_MULTIPROCESSING)
830  if ( _System_state_Is_multiprocessing ) {
831    ++maximum_internal_threads;
832  }
833#endif
834
835  return maximum_internal_threads;
836}
837
838RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
839{
840  return (Thread_Control *)
841    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
842}
843
844/**
845 * @brief Gets the heir of the processor and makes it executing.
846 *
847 * Must be called with interrupts disabled.  The thread dispatch necessary
848 * indicator is cleared as a side-effect.
849 *
850 * @return The heir thread.
851 *
852 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
853 * _Thread_Dispatch_update_heir().
854 */
855RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
856  Per_CPU_Control *cpu_self
857)
858{
859  Thread_Control *heir;
860
861  heir = cpu_self->heir;
862  cpu_self->dispatch_necessary = false;
863  cpu_self->executing = heir;
864
865  return heir;
866}
867
868RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
869  Thread_Control  *the_thread,
870  Per_CPU_Control *cpu
871)
872{
873  Timestamp_Control last;
874  Timestamp_Control ran;
875
876  last = cpu->cpu_usage_timestamp;
877  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
878  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
879  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
880}
881
882#if defined( RTEMS_SMP )
883RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
884  Per_CPU_Control *cpu_self,
885  Per_CPU_Control *cpu_for_heir,
886  Thread_Control  *heir
887)
888{
889  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
890
891  cpu_for_heir->heir = heir;
892
893  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
894}
895#endif
896
897void _Thread_Get_CPU_time_used(
898  Thread_Control    *the_thread,
899  Timestamp_Control *cpu_time_used
900);
901
902RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
903  Thread_Action_control *action_control
904)
905{
906  _Chain_Initialize_empty( &action_control->Chain );
907}
908
909RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
910  Thread_Action *action
911)
912{
913  _Chain_Set_off_chain( &action->Node );
914}
915
916RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
917  Thread_Control        *the_thread,
918  Thread_Action         *action,
919  Thread_Action_handler  handler
920)
921{
922  Per_CPU_Control *cpu_of_thread;
923
924  _Assert( _Thread_State_is_owner( the_thread ) );
925
926  cpu_of_thread = _Thread_Get_CPU( the_thread );
927
928  action->handler = handler;
929
930  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
931
932  _Chain_Append_if_is_off_chain_unprotected(
933    &the_thread->Post_switch_actions.Chain,
934    &action->Node
935  );
936}
937
938RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
939  Thread_Life_state life_state
940)
941{
942  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
943}
944
945RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
946  Thread_Life_state life_state
947)
948{
949  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
950}
951
952RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
953  Thread_Life_state life_state
954)
955{
956  return ( life_state
957    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
958}
959
960RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
961  Thread_Life_state life_state
962)
963{
964  return ( life_state
965    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
966}
967
968RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
969  const Thread_Control *the_thread
970)
971{
972  _Assert( _Thread_State_is_owner( the_thread ) );
973  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
974}
975
976/**
977 * @brief Returns true if the thread owns resources, and false otherwise.
978 *
979 * Resources are accounted with the Thread_Control::resource_count resource
980 * counter.  This counter is used by mutex objects for example.
981 *
982 * @param[in] the_thread The thread.
983 */
984RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
985  const Thread_Control *the_thread
986)
987{
988  return the_thread->resource_count != 0;
989}
990
991#if defined(RTEMS_SMP)
992RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
993  Thread_Control  *the_thread,
994  Per_CPU_Control *cpu
995)
996{
997  _Per_CPU_Acquire( cpu );
998
999  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
1000    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
1001    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
1002  }
1003
1004  _Per_CPU_Release( cpu );
1005}
1006#endif
1007
1008RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_own_node(
1009  const Thread_Control *the_thread
1010)
1011{
1012#if defined(RTEMS_SMP)
1013  return the_thread->Scheduler.own_node;
1014#else
1015  return the_thread->Scheduler.nodes;
1016#endif
1017}
1018
1019RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
1020  const Thread_Control *the_thread
1021)
1022{
1023#if defined(RTEMS_SMP)
1024  _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1025  return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
1026    _Chain_First( &the_thread->Scheduler.Wait_nodes )
1027  );
1028#else
1029  return the_thread->Scheduler.nodes;
1030#endif
1031}
1032
1033RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
1034  const Thread_Control *the_thread,
1035  size_t                scheduler_index
1036)
1037{
1038#if defined(RTEMS_SMP)
1039  return (Scheduler_Node *)
1040    ( (uintptr_t) the_thread->Scheduler.nodes
1041      + scheduler_index * _Scheduler_Node_size );
1042#else
1043  _Assert( scheduler_index == 0 );
1044  (void) scheduler_index;
1045  return the_thread->Scheduler.nodes;
1046#endif
1047}
1048
1049#if defined(RTEMS_SMP)
1050RTEMS_INLINE_ROUTINE void _Thread_Scheduler_acquire_critical(
1051  Thread_Control   *the_thread,
1052  ISR_lock_Context *lock_context
1053)
1054{
1055  _ISR_lock_Acquire( &the_thread->Scheduler.Lock, lock_context );
1056}
1057
1058RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
1059  Thread_Control   *the_thread,
1060  ISR_lock_Context *lock_context
1061)
1062{
1063  _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
1064}
1065
1066#if defined(RTEMS_SMP)
1067void _Thread_Scheduler_ask_for_help( Thread_Control *the_thread );
1068
1069void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
1070#endif
1071
1072RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
1073  Thread_Control         *the_thread,
1074  Scheduler_Node         *scheduler_node,
1075  Scheduler_Node_request  request
1076)
1077{
1078  ISR_lock_Context       lock_context;
1079  Scheduler_Node_request current_request;
1080
1081  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );
1082
1083  current_request = scheduler_node->Thread.request;
1084
1085  if ( current_request == SCHEDULER_NODE_REQUEST_NOT_PENDING ) {
1086    _Assert(
1087      request == SCHEDULER_NODE_REQUEST_ADD
1088        || request == SCHEDULER_NODE_REQUEST_REMOVE
1089    );
1090    _Assert( scheduler_node->Thread.next_request == NULL );
1091    scheduler_node->Thread.next_request = the_thread->Scheduler.requests;
1092    the_thread->Scheduler.requests = scheduler_node;
1093  } else if ( current_request != SCHEDULER_NODE_REQUEST_NOTHING ) {
1094    _Assert(
1095      ( current_request == SCHEDULER_NODE_REQUEST_ADD
1096        && request == SCHEDULER_NODE_REQUEST_REMOVE )
1097      || ( current_request == SCHEDULER_NODE_REQUEST_REMOVE
1098        && request == SCHEDULER_NODE_REQUEST_ADD )
1099    );
1100    request = SCHEDULER_NODE_REQUEST_NOTHING;
1101  }
1102
1103  scheduler_node->Thread.request = request;
1104
1105  _Thread_Scheduler_release_critical( the_thread, &lock_context );
1106}
1107
1108RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_wait_node(
1109  Thread_Control *the_thread,
1110  Scheduler_Node *scheduler_node
1111)
1112{
1113  _Chain_Append_unprotected(
1114    &the_thread->Scheduler.Wait_nodes,
1115    &scheduler_node->Thread.Wait_node
1116  );
1117  _Thread_Scheduler_add_request(
1118    the_thread,
1119    scheduler_node,
1120    SCHEDULER_NODE_REQUEST_ADD
1121  );
1122}
1123
1124RTEMS_INLINE_ROUTINE void _Thread_Scheduler_remove_wait_node(
1125  Thread_Control *the_thread,
1126  Scheduler_Node *scheduler_node
1127)
1128{
1129  _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
1130  _Thread_Scheduler_add_request(
1131    the_thread,
1132    scheduler_node,
1133    SCHEDULER_NODE_REQUEST_REMOVE
1134  );
1135}
1136#endif
1137
1138/**
1139 * @brief Returns the priority of the thread.
1140 *
1141 * Returns the user API and thread wait information relevant thread priority.
1142 * This includes temporary thread priority adjustments due to locking
1143 * protocols, a job release or the POSIX sporadic server for example.
1144 *
1145 * @return The priority of the thread.
1146 */
1147RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
1148  const Thread_Control *the_thread
1149)
1150{
1151  Scheduler_Node *scheduler_node;
1152
1153  scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1154  return _Priority_Get_priority( &scheduler_node->Wait.Priority );
1155}
1156
1157/**
1158 * @brief Acquires the thread wait default lock inside a critical section
1159 * (interrupts disabled).
1160 *
1161 * @param[in] the_thread The thread.
1162 * @param[in] lock_context The lock context used for the corresponding lock
1163 *   release.
1164 *
1165 * @see _Thread_Wait_release_default_critical().
1166 */
1167RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
1168  Thread_Control   *the_thread,
1169  ISR_lock_Context *lock_context
1170)
1171{
1172  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
1173}
1174
1175/**
1176 * @brief Acquires the thread wait default lock and returns the executing
1177 * thread.
1178 *
1179 * @param[in] lock_context The lock context used for the corresponding lock
1180 *   release.
1181 *
1182 * @return The executing thread.
1183 *
1184 * @see _Thread_Wait_release_default().
1185 */
1186RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
1187  ISR_lock_Context *lock_context
1188)
1189{
1190  Thread_Control *executing;
1191
1192  _ISR_lock_ISR_disable( lock_context );
1193  executing = _Thread_Executing;
1194  _Thread_Wait_acquire_default_critical( executing, lock_context );
1195
1196  return executing;
1197}
1198
1199/**
1200 * @brief Acquires the thread wait default lock and disables interrupts.
1201 *
1202 * @param[in] the_thread The thread.
1203 * @param[in] lock_context The lock context used for the corresponding lock
1204 *   release.
1205 *
1206 * @see _Thread_Wait_release_default().
1207 */
1208RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1209  Thread_Control   *the_thread,
1210  ISR_lock_Context *lock_context
1211)
1212{
1213  _ISR_lock_ISR_disable( lock_context );
1214  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1215}
1216
1217/**
1218 * @brief Releases the thread wait default lock inside a critical section
1219 * (interrupts disabled).
1220 *
1221 * The previous interrupt status is not restored.
1222 *
1223 * @param[in] the_thread The thread.
1224 * @param[in] lock_context The lock context used for the corresponding lock
1225 *   acquire.
1226 */
1227RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1228  Thread_Control   *the_thread,
1229  ISR_lock_Context *lock_context
1230)
1231{
1232  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1233}
1234
1235/**
1236 * @brief Releases the thread wait default lock and restores the previous
1237 * interrupt status.
1238 *
1239 * @param[in] the_thread The thread.
1240 * @param[in] lock_context The lock context used for the corresponding lock
1241 *   acquire.
1242 */
1243RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1244  Thread_Control   *the_thread,
1245  ISR_lock_Context *lock_context
1246)
1247{
1248  _Thread_Wait_release_default_critical( the_thread, lock_context );
1249  _ISR_lock_ISR_enable( lock_context );
1250}
1251
1252#if defined(RTEMS_SMP)
1253#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1254  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1255
1256RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1257  Thread_Control            *the_thread,
1258  Thread_queue_Lock_context *queue_lock_context
1259)
1260{
1261  Chain_Node *first;
1262
1263  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1264  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1265
1266  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1267    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1268  }
1269}
1270
1271RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1272  Thread_queue_Queue        *queue,
1273  Thread_queue_Lock_context *queue_lock_context
1274)
1275{
1276  _Thread_queue_Queue_acquire_critical(
1277    queue,
1278    &_Thread_Executing->Potpourri_stats,
1279    &queue_lock_context->Lock_context
1280  );
1281}
1282
1283RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1284  Thread_queue_Queue        *queue,
1285  Thread_queue_Lock_context *queue_lock_context
1286)
1287{
1288  _Thread_queue_Queue_release_critical(
1289    queue,
1290    &queue_lock_context->Lock_context
1291  );
1292}
1293#endif
1294
1295/**
1296 * @brief Acquires the thread wait lock inside a critical section (interrupts
1297 * disabled).
1298 *
1299 * @param[in] the_thread The thread.
1300 * @param[in] queue_context The thread queue context for the corresponding
1301 *   _Thread_Wait_release_critical().
1302 *
1303 * @see _Thread_queue_Context_initialize().
1304 */
1305RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1306  Thread_Control       *the_thread,
1307  Thread_queue_Context *queue_context
1308)
1309{
1310#if defined(RTEMS_SMP)
1311  Thread_queue_Queue *queue;
1312
1313  _Thread_Wait_acquire_default_critical(
1314    the_thread,
1315    &queue_context->Lock_context.Lock_context
1316  );
1317
1318  queue = the_thread->Wait.queue;
1319  queue_context->Lock_context.Wait.queue = queue;
1320
1321  if ( queue != NULL ) {
1322    _Thread_queue_Gate_add(
1323      &the_thread->Wait.Lock.Pending_requests,
1324      &queue_context->Lock_context.Wait.Gate
1325    );
1326    _Thread_Wait_release_default_critical(
1327      the_thread,
1328      &queue_context->Lock_context.Lock_context
1329    );
1330    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1331
1332    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1333      _Thread_Wait_release_queue_critical(
1334        queue,
1335        &queue_context->Lock_context
1336      );
1337      _Thread_Wait_acquire_default_critical(
1338        the_thread,
1339        &queue_context->Lock_context.Lock_context
1340      );
1341      _Thread_Wait_remove_request_locked(
1342        the_thread,
1343        &queue_context->Lock_context
1344      );
1345      _Assert( the_thread->Wait.queue == NULL );
1346    }
1347  }
1348#else
1349  (void) the_thread;
1350  (void) queue_context;
1351#endif
1352}
1353
1354/**
1355 * @brief Acquires the thread wait default lock and disables interrupts.
1356 *
1357 * @param[in] the_thread The thread.
1358 * @param[in] queue_context The thread queue context for the corresponding
1359 *   _Thread_Wait_release().
1360 */
1361RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1362  Thread_Control       *the_thread,
1363  Thread_queue_Context *queue_context
1364)
1365{
1366  _Thread_queue_Context_initialize( queue_context );
1367  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1368  _Thread_Wait_acquire_critical( the_thread, queue_context );
1369}
1370
1371/**
1372 * @brief Releases the thread wait lock inside a critical section (interrupts
1373 * disabled).
1374 *
1375 * The previous interrupt status is not restored.
1376 *
1377 * @param[in] the_thread The thread.
1378 * @param[in] queue_context The thread queue context used for corresponding
1379 *   _Thread_Wait_acquire_critical().
1380 */
1381RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1382  Thread_Control       *the_thread,
1383  Thread_queue_Context *queue_context
1384)
1385{
1386#if defined(RTEMS_SMP)
1387  Thread_queue_Queue *queue;
1388
1389  queue = queue_context->Lock_context.Wait.queue;
1390
1391  if ( queue != NULL ) {
1392    _Thread_Wait_release_queue_critical(
1393      queue, &queue_context->Lock_context
1394    );
1395    _Thread_Wait_acquire_default_critical(
1396      the_thread,
1397      &queue_context->Lock_context.Lock_context
1398    );
1399    _Thread_Wait_remove_request_locked(
1400      the_thread,
1401      &queue_context->Lock_context
1402    );
1403  }
1404
1405  _Thread_Wait_release_default_critical(
1406    the_thread,
1407    &queue_context->Lock_context.Lock_context
1408  );
1409#else
1410  (void) the_thread;
1411  (void) queue_context;
1412#endif
1413}
1414
1415/**
1416 * @brief Releases the thread wait lock and restores the previous interrupt
1417 * status.
1418 *
1419 * @param[in] the_thread The thread.
1420 * @param[in] queue_context The thread queue context used for corresponding
1421 *   _Thread_Wait_acquire().
1422 */
1423RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1424  Thread_Control       *the_thread,
1425  Thread_queue_Context *queue_context
1426)
1427{
1428  _Thread_Wait_release_critical( the_thread, queue_context );
1429  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1430}
1431
1432/**
1433 * @brief Claims the thread wait queue.
1434 *
1435 * The caller must not be the owner of the default thread wait lock.  The
1436 * caller must be the owner of the corresponding thread queue lock.  The
1437 * registration of the corresponding thread queue operations is deferred and
1438 * done after the deadlock detection.  This is crucial to support timeouts on
1439 * SMP configurations.
1440 *
1441 * @param[in] the_thread The thread.
1442 * @param[in] queue The new thread queue.
1443 *
1444 * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
1445 */
1446RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1447  Thread_Control     *the_thread,
1448  Thread_queue_Queue *queue
1449)
1450{
1451  ISR_lock_Context lock_context;
1452
1453  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1454
1455  _Assert( the_thread->Wait.queue == NULL );
1456
1457#if defined(RTEMS_SMP)
1458  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1459  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1460  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1461#endif
1462
1463  the_thread->Wait.queue = queue;
1464
1465  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1466}
1467
1468/**
1469 * @brief Finalizes the thread wait queue claim via registration of the
1470 * corresponding thread queue operations.
1471 *
1472 * @param[in] the_thread The thread.
1473 * @param[in] operations The corresponding thread queue operations.
1474 */
1475RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
1476  Thread_Control                *the_thread,
1477  const Thread_queue_Operations *operations
1478)
1479{
1480  the_thread->Wait.operations = operations;
1481}
1482
1483/**
1484 * @brief Removes a thread wait lock request.
1485 *
1486 * On SMP configurations, removes a thread wait lock request.
1487 *
1488 * On other configurations, this function does nothing.
1489 *
1490 * @param[in] the_thread The thread.
1491 * @param[in] queue_lock_context The thread queue lock context used for
1492 *   corresponding _Thread_Wait_acquire().
1493 */
1494RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1495  Thread_Control            *the_thread,
1496  Thread_queue_Lock_context *queue_lock_context
1497)
1498{
1499#if defined(RTEMS_SMP)
1500  ISR_lock_Context lock_context;
1501
1502  _Thread_Wait_acquire_default( the_thread, &lock_context );
1503  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1504  _Thread_Wait_release_default( the_thread, &lock_context );
1505#else
1506  (void) the_thread;
1507  (void) queue_lock_context;
1508#endif
1509}
1510
1511/**
1512 * @brief Restores the default thread wait queue and operations.
1513 *
1514 * The caller must be the owner of the current thread wait queue lock.
1515 *
1516 * On SMP configurations, the pending requests are updated to use the stale
1517 * thread queue operations.
1518 *
1519 * @param[in] the_thread The thread.
1520 *
1521 * @see _Thread_Wait_claim().
1522 */
1523RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1524  Thread_Control *the_thread
1525)
1526{
1527#if defined(RTEMS_SMP)
1528  ISR_lock_Context  lock_context;
1529  Chain_Node       *node;
1530  const Chain_Node *tail;
1531
1532  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1533
1534  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1535  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1536
1537  if ( node != tail ) {
1538    do {
1539      Thread_queue_Context *queue_context;
1540
1541      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1542      queue_context->Lock_context.Wait.queue = NULL;
1543
1544      node = _Chain_Next( node );
1545    } while ( node != tail );
1546
1547    _Thread_queue_Gate_add(
1548      &the_thread->Wait.Lock.Pending_requests,
1549      &the_thread->Wait.Lock.Tranquilizer
1550    );
1551  } else {
1552    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1553  }
1554#endif
1555
1556  the_thread->Wait.queue = NULL;
1557  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1558
1559#if defined(RTEMS_SMP)
1560  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1561#endif
1562}
1563
1564/**
1565 * @brief Tranquilizes the thread after a wait on a thread queue.
1566 *
1567 * After the violent blocking procedure this function makes the thread calm and
1568 * peaceful again so that it can carry out its normal work.
1569 *
1570 * On SMP configurations, ensures that all pending thread wait lock requests
1571 * completed before the thread is able to begin a new thread wait procedure.
1572 *
1573 * On other configurations, this function does nothing.
1574 *
1575 * It must be called after a _Thread_Wait_claim() exactly once
1576 *  - after the corresponding thread queue lock was released, and
1577 *  - the default wait state is restored or some other processor is about to do
1578 *    this.
1579 *
1580 * @param[in] the_thread The thread.
1581 */
1582RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1583  Thread_Control *the_thread
1584)
1585{
1586#if defined(RTEMS_SMP)
1587  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1588#else
1589  (void) the_thread;
1590#endif
1591}
1592
1593/**
1594 * @brief Cancels a thread wait on a thread queue.
1595 *
1596 * @param[in] the_thread The thread.
1597 * @param[in] queue_context The thread queue context used for corresponding
1598 *   _Thread_Wait_acquire().
1599 */
1600RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1601  Thread_Control       *the_thread,
1602  Thread_queue_Context *queue_context
1603)
1604{
1605  Thread_queue_Queue *queue;
1606
1607  queue = the_thread->Wait.queue;
1608
1609#if defined(RTEMS_SMP)
1610  if ( queue != NULL ) {
1611    _Assert( queue_context->Lock_context.Wait.queue == queue );
1612#endif
1613
1614    ( *the_thread->Wait.operations->extract )(
1615      queue,
1616      the_thread,
1617      queue_context
1618    );
1619    _Thread_Wait_restore_default( the_thread );
1620
1621#if defined(RTEMS_SMP)
1622    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1623    queue_context->Lock_context.Wait.queue = queue;
1624  }
1625#endif
1626}
1627
1628/**
1629 * @brief The initial thread wait flags value set by _Thread_Initialize().
1630 */
1631#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1632
1633/**
1634 * @brief Mask to get the thread wait state flags.
1635 */
1636#define THREAD_WAIT_STATE_MASK 0xffU
1637
1638/**
1639 * @brief Indicates that the thread begins with the blocking operation.
1640 *
1641 * A blocking operation consists of an optional watchdog initialization and the
1642 * setting of the appropriate thread blocking state with the corresponding
1643 * scheduler block operation.
1644 */
1645#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1646
1647/**
1648 * @brief Indicates that the thread completed the blocking operation.
1649 */
1650#define THREAD_WAIT_STATE_BLOCKED 0x2U
1651
1652/**
1653 * @brief Indicates that a condition to end the thread wait occurred.
1654 *
1655 * This could be a timeout, a signal, an event or a resource availability.
1656 */
1657#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1658
1659/**
1660 * @brief Mask to get the thread wait class flags.
1661 */
1662#define THREAD_WAIT_CLASS_MASK 0xff00U
1663
1664/**
1665 * @brief Indicates that the thread waits for an event.
1666 */
1667#define THREAD_WAIT_CLASS_EVENT 0x100U
1668
1669/**
1670 * @brief Indicates that the thread waits for a system event.
1671 */
1672#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1673
1674/**
1675 * @brief Indicates that the thread waits for an object.
1676 */
1677#define THREAD_WAIT_CLASS_OBJECT 0x400U
1678
1679/**
1680 * @brief Indicates that the thread waits for a period.
1681 */
1682#define THREAD_WAIT_CLASS_PERIOD 0x800U
1683
1684RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1685  Thread_Control    *the_thread,
1686  Thread_Wait_flags  flags
1687)
1688{
1689#if defined(RTEMS_SMP)
1690  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1691#else
1692  the_thread->Wait.flags = flags;
1693#endif
1694}
1695
1696RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1697  const Thread_Control *the_thread
1698)
1699{
1700#if defined(RTEMS_SMP)
1701  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1702#else
1703  return the_thread->Wait.flags;
1704#endif
1705}
1706
1707RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get_acquire(
1708  const Thread_Control *the_thread
1709)
1710{
1711#if defined(RTEMS_SMP)
1712  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_ACQUIRE );
1713#else
1714  return the_thread->Wait.flags;
1715#endif
1716}
1717
1718/**
1719 * @brief Tries to change the thread wait flags with release semantics in case
1720 * of success.
1721 *
1722 * Must be called inside a critical section (interrupts disabled).
1723 *
1724 * In case the wait flags are equal to the expected wait flags, then the wait
1725 * flags are set to the desired wait flags.
1726 *
1727 * @param[in] the_thread The thread.
1728 * @param[in] expected_flags The expected wait flags.
1729 * @param[in] desired_flags The desired wait flags.
1730 *
1731 * @retval true The wait flags were equal to the expected wait flags.
1732 * @retval false Otherwise.
1733 */
1734RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1735  Thread_Control    *the_thread,
1736  Thread_Wait_flags  expected_flags,
1737  Thread_Wait_flags  desired_flags
1738)
1739{
1740  _Assert( _ISR_Get_level() != 0 );
1741
1742#if defined(RTEMS_SMP)
1743  return _Atomic_Compare_exchange_uint(
1744    &the_thread->Wait.flags,
1745    &expected_flags,
1746    desired_flags,
1747    ATOMIC_ORDER_RELEASE,
1748    ATOMIC_ORDER_RELAXED
1749  );
1750#else
1751  bool success = ( the_thread->Wait.flags == expected_flags );
1752
1753  if ( success ) {
1754    the_thread->Wait.flags = desired_flags;
1755  }
1756
1757  return success;
1758#endif
1759}
1760
1761/**
1762 * @brief Tries to change the thread wait flags with acquire semantics.
1763 *
1764 * In case the wait flags are equal to the expected wait flags, then the wait
1765 * flags are set to the desired wait flags.
1766 *
1767 * @param[in] the_thread The thread.
1768 * @param[in] expected_flags The expected wait flags.
1769 * @param[in] desired_flags The desired wait flags.
1770 *
1771 * @retval true The wait flags were equal to the expected wait flags.
1772 * @retval false Otherwise.
1773 */
1774RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1775  Thread_Control    *the_thread,
1776  Thread_Wait_flags  expected_flags,
1777  Thread_Wait_flags  desired_flags
1778)
1779{
1780  bool success;
1781#if defined(RTEMS_SMP)
1782  return _Atomic_Compare_exchange_uint(
1783    &the_thread->Wait.flags,
1784    &expected_flags,
1785    desired_flags,
1786    ATOMIC_ORDER_ACQUIRE,
1787    ATOMIC_ORDER_ACQUIRE
1788  );
1789#else
1790  ISR_Level level;
1791
1792  _ISR_Local_disable( level );
1793
1794  success = _Thread_Wait_flags_try_change_release(
1795    the_thread,
1796    expected_flags,
1797    desired_flags
1798  );
1799
1800  _ISR_Local_enable( level );
1801#endif
1802
1803  return success;
1804}
1805
1806/**
1807 * @brief Returns the object identifier of the object containing the current
1808 * thread wait queue.
1809 *
1810 * This function may be used for debug and system information purposes.  The
1811 * caller must be the owner of the thread lock.
1812 *
1813 * @retval 0 The thread waits on no thread queue currently, the thread wait
1814 *   queue is not contained in an object, or the current thread state provides
1815 *   insufficient information, e.g. the thread is in the middle of a blocking
1816 *   operation.
1817 * @retval other The object identifier of the object containing the thread wait
1818 *   queue.
1819 */
1820Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1821
1822RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1823  const Thread_Control *the_thread
1824)
1825{
1826  return (Status_Control) the_thread->Wait.return_code;
1827}
1828
1829/**
1830 * @brief General purpose thread wait timeout.
1831 *
1832 * @param[in] watchdog The thread timer watchdog.
1833 */
1834void _Thread_Timeout( Watchdog_Control *watchdog );
1835
1836RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1837  Thread_Timer_information *timer,
1838  Per_CPU_Control          *cpu
1839)
1840{
1841  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1842  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1843  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1844}
1845
1846RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative(
1847  Thread_Control                 *the_thread,
1848  Per_CPU_Control                *cpu,
1849  Watchdog_Service_routine_entry  routine,
1850  Watchdog_Interval               ticks
1851)
1852{
1853  ISR_lock_Context lock_context;
1854
1855  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1856
1857  the_thread->Timer.header =
1858    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1859  the_thread->Timer.Watchdog.routine = routine;
1860  _Watchdog_Per_CPU_insert_relative( &the_thread->Timer.Watchdog, cpu, ticks );
1861
1862  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1863}
1864
1865RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_absolute(
1866  Thread_Control                 *the_thread,
1867  Per_CPU_Control                *cpu,
1868  Watchdog_Service_routine_entry  routine,
1869  uint64_t                        expire
1870)
1871{
1872  ISR_lock_Context lock_context;
1873
1874  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1875
1876  the_thread->Timer.header =
1877    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_ABSOLUTE ];
1878  the_thread->Timer.Watchdog.routine = routine;
1879  _Watchdog_Per_CPU_insert_absolute( &the_thread->Timer.Watchdog, cpu, expire );
1880
1881  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1882}
1883
1884RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1885{
1886  ISR_lock_Context lock_context;
1887
1888  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1889
1890  _Watchdog_Per_CPU_remove(
1891    &the_thread->Timer.Watchdog,
1892#if defined(RTEMS_SMP)
1893    the_thread->Timer.Watchdog.cpu,
1894#else
1895    _Per_CPU_Get(),
1896#endif
1897    the_thread->Timer.header
1898  );
1899
1900  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1901}
1902
1903RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1904  Thread_Control     *the_thread,
1905  Thread_queue_Queue *queue
1906)
1907{
1908  _Thread_Wait_tranquilize( the_thread );
1909  _Thread_Timer_remove( the_thread );
1910
1911#if defined(RTEMS_MULTIPROCESSING)
1912  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1913    _Thread_Unblock( the_thread );
1914  } else {
1915    _Thread_queue_Unblock_proxy( queue, the_thread );
1916  }
1917#else
1918  (void) queue;
1919  _Thread_Unblock( the_thread );
1920#endif
1921}
1922
1923/** @}*/
1924
1925#ifdef __cplusplus
1926}
1927#endif
1928
1929#if defined(RTEMS_MULTIPROCESSING)
1930#include <rtems/score/threadmp.h>
1931#endif
1932
1933#endif
1934/* end of include file */
Note: See TracBrowser for help on using the repository browser.