source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ b20b736

5
Last change on this file since b20b736 was b20b736, checked in by Sebastian Huber <sebastian.huber@…>, on 06/28/16 at 04:54:50

score: Introduce _Thread_Get_priority()

Avoid direct access to thread internal data fields.

  • Property mode set to 100644
File size: 44.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014, 2016 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/resourceimpl.h>
31#include <rtems/score/schedulernodeimpl.h>
32#include <rtems/score/statesimpl.h>
33#include <rtems/score/status.h>
34#include <rtems/score/sysstate.h>
35#include <rtems/score/threadqimpl.h>
36#include <rtems/score/todimpl.h>
37#include <rtems/score/freechain.h>
38#include <rtems/score/watchdogimpl.h>
39#include <rtems/config.h>
40
41#ifdef __cplusplus
42extern "C" {
43#endif
44
45/**
46 * @addtogroup ScoreThread
47 */
48/**@{**/
49
50/**
51 *  The following structure contains the information necessary to manage
52 *  a thread which it is  waiting for a resource.
53 */
54#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
55
56/**
57 *  Self for the GNU Ada Run-Time
58 */
59extern void *rtems_ada_self;
60
61typedef struct {
62  Objects_Information Objects;
63
64  Freechain_Control Free_thread_queue_heads;
65} Thread_Information;
66
67/**
68 *  The following defines the information control block used to
69 *  manage this class of objects.
70 */
71extern Thread_Information _Thread_Internal_information;
72
73/**
74 *  The following points to the thread whose floating point
75 *  context is currently loaded.
76 */
77#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
78extern Thread_Control *_Thread_Allocated_fp;
79#endif
80
81#if defined(RTEMS_SMP)
82#define THREAD_RESOURCE_NODE_TO_THREAD( node ) \
83  RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node )
84#endif
85
86void _Thread_Initialize_information(
87  Thread_Information  *information,
88  Objects_APIs         the_api,
89  uint16_t             the_class,
90  uint32_t             maximum,
91  bool                 is_string,
92  uint32_t             maximum_name_length
93);
94
95/**
96 *  @brief Initialize thread handler.
97 *
98 *  This routine performs the initialization necessary for this handler.
99 */
100void _Thread_Handler_initialization(void);
101
102/**
103 *  @brief Create idle thread.
104 *
105 *  This routine creates the idle thread.
106 *
107 *  @warning No thread should be created before this one.
108 */
109void _Thread_Create_idle(void);
110
111/**
112 *  @brief Start thread multitasking.
113 *
114 *  This routine initiates multitasking.  It is invoked only as
115 *  part of initialization and its invocation is the last act of
116 *  the non-multitasking part of the system initialization.
117 */
118void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
119
120/**
121 *  @brief Allocate the requested stack space for the thread.
122 *
123 *  Allocate the requested stack space for the thread.
124 *  Set the Start.stack field to the address of the stack.
125 *
126 *  @param[in] the_thread is the thread where the stack space is requested
127 *  @param[in] stack_size is the stack space is requested
128 *
129 *  @retval actual size allocated after any adjustment
130 *  @retval zero if the allocation failed
131 */
132size_t _Thread_Stack_Allocate(
133  Thread_Control *the_thread,
134  size_t          stack_size
135);
136
137/**
138 *  @brief Deallocate thread stack.
139 *
140 *  Deallocate the Thread's stack.
141 */
142void _Thread_Stack_Free(
143  Thread_Control *the_thread
144);
145
146/**
147 *  @brief Initialize thread.
148 *
149 *  This routine initializes the specified the thread.  It allocates
150 *  all memory associated with this thread.  It completes by adding
151 *  the thread to the local object table so operations on this
152 *  thread id are allowed.
153 *
154 *  @note If stack_area is NULL, it is allocated from the workspace.
155 *
156 *  @note If the stack is allocated from the workspace, then it is
157 *        guaranteed to be of at least minimum size.
158 */
159bool _Thread_Initialize(
160  Thread_Information                   *information,
161  Thread_Control                       *the_thread,
162  const struct Scheduler_Control       *scheduler,
163  void                                 *stack_area,
164  size_t                                stack_size,
165  bool                                  is_fp,
166  Priority_Control                      priority,
167  bool                                  is_preemptible,
168  Thread_CPU_budget_algorithms          budget_algorithm,
169  Thread_CPU_budget_algorithm_callout   budget_callout,
170  uint32_t                              isr_level,
171  Objects_Name                          name
172);
173
174/**
175 *  @brief Initializes thread and executes it.
176 *
177 *  This routine initializes the executable information for a thread
178 *  and makes it ready to execute.  After this routine executes, the
179 *  thread competes with all other threads for CPU time.
180 *
181 *  @param the_thread The thread to be started.
182 *  @param entry The thread entry information.
183 */
184bool _Thread_Start(
185  Thread_Control                 *the_thread,
186  const Thread_Entry_information *entry,
187  ISR_lock_Context               *lock_context
188);
189
190void _Thread_Restart_self(
191  Thread_Control                 *executing,
192  const Thread_Entry_information *entry,
193  ISR_lock_Context               *lock_context
194) RTEMS_NO_RETURN;
195
196bool _Thread_Restart_other(
197  Thread_Control                 *the_thread,
198  const Thread_Entry_information *entry,
199  ISR_lock_Context               *lock_context
200);
201
202void _Thread_Yield( Thread_Control *executing );
203
204Thread_Life_state _Thread_Change_life(
205  Thread_Life_state clear,
206  Thread_Life_state set,
207  Thread_Life_state ignore
208);
209
210Thread_Life_state _Thread_Set_life_protection( Thread_Life_state state );
211
212/**
213 * @brief Kills all zombie threads in the system.
214 *
215 * Threads change into the zombie state as the last step in the thread
216 * termination sequence right before a context switch to the heir thread is
217 * initiated.  Since the thread stack is still in use during this phase we have
218 * to postpone the thread stack reclamation until this point.  On SMP
219 * configurations we may have to busy wait for context switch completion here.
220 */
221void _Thread_Kill_zombies( void );
222
223void _Thread_Exit(
224  Thread_Control    *executing,
225  Thread_Life_state  set,
226  void              *exit_value
227);
228
229void _Thread_Join(
230  Thread_Control       *the_thread,
231  States_Control        waiting_for_join,
232  Thread_Control       *executing,
233  Thread_queue_Context *queue_context
234);
235
236void _Thread_Cancel(
237  Thread_Control *the_thread,
238  Thread_Control *executing,
239  void           *exit_value
240);
241
242/**
243 * @brief Closes the thread.
244 *
245 * Closes the thread object and starts the thread termination sequence.  In
246 * case the executing thread is not terminated, then this function waits until
247 * the terminating thread reached the zombie state.
248 */
249void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
250
251RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
252{
253  return _States_Is_ready( the_thread->current_state );
254}
255
256States_Control _Thread_Clear_state_locked(
257  Thread_Control *the_thread,
258  States_Control  state
259);
260
261/**
262 * @brief Clears the specified thread state.
263 *
264 * In case the previous state is a non-ready state and the next state is the
265 * ready state, then the thread is unblocked by the scheduler.
266 *
267 * @param[in] the_thread The thread.
268 * @param[in] state The state to clear.  It must not be zero.
269 *
270 * @return The previous state.
271 */
272States_Control _Thread_Clear_state(
273  Thread_Control *the_thread,
274  States_Control  state
275);
276
277States_Control _Thread_Set_state_locked(
278  Thread_Control *the_thread,
279  States_Control  state
280);
281
282/**
283 * @brief Sets the specified thread state.
284 *
285 * In case the previous state is the ready state, then the thread is blocked by
286 * the scheduler.
287 *
288 * @param[in] the_thread The thread.
289 * @param[in] state The state to set.  It must not be zero.
290 *
291 * @return The previous state.
292 */
293States_Control _Thread_Set_state(
294  Thread_Control *the_thread,
295  States_Control  state
296);
297
298/**
299 *  @brief Initializes enviroment for a thread.
300 *
301 *  This routine initializes the context of @a the_thread to its
302 *  appropriate starting state.
303 *
304 *  @param[in] the_thread is the pointer to the thread control block.
305 */
306void _Thread_Load_environment(
307  Thread_Control *the_thread
308);
309
310void _Thread_Entry_adaptor_idle( Thread_Control *executing );
311
312void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
313
314void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
315
316/**
317 *  @brief Wrapper function for all threads.
318 *
319 *  This routine is the wrapper function for all threads.  It is
320 *  the starting point for all threads.  The user provided thread
321 *  entry point is invoked by this routine.  Operations
322 *  which must be performed immediately before and after the user's
323 *  thread executes are found here.
324 *
325 *  @note On entry, it is assumed all interrupts are blocked and that this
326 *  routine needs to set the initial isr level.  This may or may not
327 *  actually be needed by the context switch routine and as a result
328 *  interrupts may already be at there proper level.  Either way,
329 *  setting the initial isr level properly here is safe.
330 */
331void _Thread_Handler( void );
332
333/**
334 * @brief Executes the global constructors and then restarts itself as the
335 * first initialization thread.
336 *
337 * The first initialization thread is the first RTEMS initialization task or
338 * the first POSIX initialization thread in case no RTEMS initialization tasks
339 * are present.
340 */
341void _Thread_Global_construction(
342  Thread_Control                 *executing,
343  const Thread_Entry_information *entry
344) RTEMS_NO_RETURN;
345
346/**
347 *  @brief Ended the delay of a thread.
348 *
349 *  This routine is invoked when a thread must be unblocked at the
350 *  end of a time based delay (i.e. wake after or wake when).
351 *  It is called by the watchdog handler.
352 *
353 *  @param[in] id is the thread id
354 *  @param[in] ignored is not used
355 */
356void _Thread_Delay_ended(
357  Objects_Id  id,
358  void       *ignored
359);
360
361RTEMS_INLINE_ROUTINE void _Thread_State_acquire_critical(
362  Thread_Control   *the_thread,
363  ISR_lock_Context *lock_context
364)
365{
366  _Thread_queue_Do_acquire_critical( &the_thread->Join_queue, lock_context );
367}
368
369RTEMS_INLINE_ROUTINE void _Thread_State_acquire(
370  Thread_Control   *the_thread,
371  ISR_lock_Context *lock_context
372)
373{
374  _ISR_lock_ISR_disable( lock_context );
375  _Thread_State_acquire_critical( the_thread, lock_context );
376}
377
378RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
379  ISR_lock_Context *lock_context
380)
381{
382  Thread_Control *executing;
383
384  _ISR_lock_ISR_disable( lock_context );
385  executing = _Thread_Executing;
386  _Thread_State_acquire_critical( executing, lock_context );
387
388  return executing;
389}
390
391RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
392  Thread_Control   *the_thread,
393  ISR_lock_Context *lock_context
394)
395{
396  _Thread_queue_Do_release_critical( &the_thread->Join_queue, lock_context );
397}
398
399RTEMS_INLINE_ROUTINE void _Thread_State_release(
400  Thread_Control   *the_thread,
401  ISR_lock_Context *lock_context
402)
403{
404  _Thread_State_release_critical( the_thread, lock_context );
405  _ISR_lock_ISR_enable( lock_context );
406}
407
408#if defined(RTEMS_DEBUG)
409RTEMS_INLINE_ROUTINE bool _Thread_State_is_owner(
410  const Thread_Control *the_thread
411)
412{
413  return _Thread_queue_Is_lock_owner( &the_thread->Join_queue );
414}
415#endif
416
417/**
418 * @brief Returns true if the left thread priority is less than the right
419 * thread priority in the intuitive sense of priority and false otherwise.
420 */
421RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
422  Priority_Control left,
423  Priority_Control right
424)
425{
426  return left > right;
427}
428
429/**
430 * @brief Returns the highest priority of the left and right thread priorities
431 * in the intuitive sense of priority.
432 */
433RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
434  Priority_Control left,
435  Priority_Control right
436)
437{
438  return _Thread_Priority_less_than( left, right ) ? right : left;
439}
440
441/**
442 * @brief Filters a thread priority change.
443 *
444 * Called by _Thread_Change_priority() under the protection of the thread lock.
445 *
446 * @param[in] the_thread The thread.
447 * @param[in, out] new_priority The new priority of the thread.  The filter may
448 * alter this value.
449 * @param[in] arg The argument passed to _Thread_Change_priority().
450 *
451 * @retval true Change the current priority.
452 * @retval false Otherwise.
453 */
454typedef bool ( *Thread_Change_priority_filter )(
455  Thread_Control   *the_thread,
456  Priority_Control *new_priority,
457  void             *arg
458);
459
460Thread_Control *_Thread_Apply_priority(
461  Thread_Control                *the_thread,
462  Priority_Control               new_priority,
463  void                          *arg,
464  Thread_Change_priority_filter  filter,
465  bool                           prepend_it
466);
467
468void _Thread_Update_priority( Thread_Control *the_thread );
469
470/**
471 * @brief Changes the priority of a thread if allowed by the filter function.
472 *
473 * It changes current priority of the thread to the new priority in case the
474 * filter function returns true.  In this case the scheduler is notified of the
475 * priority change as well.
476 *
477 * @param[in] the_thread The thread.
478 * @param[in] new_priority The new priority of the thread.
479 * @param[in] arg The argument for the filter function.
480 * @param[in] filter The filter function to determine if a priority change is
481 * allowed and optionally perform other actions under the protection of the
482 * thread lock simultaneously with the update of the current priority.
483 * @param[in] prepend_it In case this is true, then the thread is prepended to
484 * its priority group in its scheduler instance, otherwise it is appended.
485 */
486void _Thread_Change_priority(
487  Thread_Control                *the_thread,
488  Priority_Control               new_priority,
489  void                          *arg,
490  Thread_Change_priority_filter  filter,
491  bool                           prepend_it
492);
493
494/**
495 * @brief Raises the priority of a thread.
496 *
497 * It changes the current priority of the thread to the new priority if the new
498 * priority is higher than the current priority.  In this case the thread is
499 * appended to its new priority group in its scheduler instance.
500 *
501 * @param[in] the_thread The thread.
502 * @param[in] new_priority The new priority of the thread.
503 *
504 * @see _Thread_Change_priority().
505 */
506void _Thread_Raise_priority(
507  Thread_Control   *the_thread,
508  Priority_Control  new_priority
509);
510
511/**
512 * @brief Sets the current to the real priority of a thread.
513 *
514 * Sets the priority restore hint to false.
515 */
516void _Thread_Restore_priority( Thread_Control *the_thread );
517
518/**
519 * @brief Sets the priority of a thread.
520 *
521 * It sets the real priority of the thread.  In addition it changes the current
522 * priority of the thread if the new priority is higher than the current
523 * priority or the thread owns no resources.
524 *
525 * @param[in] the_thread The thread.
526 * @param[in] new_priority The new priority of the thread.
527 * @param[out] old_priority The old real priority of the thread.  This pointer
528 * must not be @c NULL.
529 * @param[in] prepend_it In case this is true, then the thread is prepended to
530 * its priority group in its scheduler instance, otherwise it is appended.
531 *
532 * @see _Thread_Change_priority().
533 */
534void _Thread_Set_priority(
535  Thread_Control   *the_thread,
536  Priority_Control  new_priority,
537  Priority_Control *old_priority,
538  bool              prepend_it
539);
540
541RTEMS_INLINE_ROUTINE Objects_Information *_Thread_Get_objects_information(
542  Objects_Id id
543)
544{
545  uint32_t the_api;
546
547  the_api = _Objects_Get_API( id );
548
549  if ( !_Objects_Is_api_valid( the_api ) ) {
550    return NULL;
551  }
552
553  /*
554   * Threads are always first class :)
555   *
556   * There is no need to validate the object class of the object identifier,
557   * since this will be done by the object get methods.
558   */
559  return _Objects_Information_table[ the_api ][ 1 ];
560}
561
562/**
563 * @brief Gets a thread by its identifier.
564 *
565 * @see _Objects_Get().
566 */
567Thread_Control *_Thread_Get(
568  Objects_Id         id,
569  ISR_lock_Context  *lock_context
570);
571
572RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
573  const Thread_Control *thread
574)
575{
576#if defined(RTEMS_SMP)
577  return thread->Scheduler.cpu;
578#else
579  (void) thread;
580
581  return _Per_CPU_Get();
582#endif
583}
584
585RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
586  Thread_Control *thread,
587  Per_CPU_Control *cpu
588)
589{
590#if defined(RTEMS_SMP)
591  thread->Scheduler.cpu = cpu;
592#else
593  (void) thread;
594  (void) cpu;
595#endif
596}
597
598/**
599 * This function returns true if the_thread is the currently executing
600 * thread, and false otherwise.
601 */
602
603RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
604  const Thread_Control *the_thread
605)
606{
607  return ( the_thread == _Thread_Executing );
608}
609
610#if defined(RTEMS_SMP)
611/**
612 * @brief Returns @a true in case the thread executes currently on some
613 * processor in the system, otherwise @a false.
614 *
615 * Do not confuse this with _Thread_Is_executing() which checks only the
616 * current processor.
617 */
618RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
619  const Thread_Control *the_thread
620)
621{
622  return _CPU_Context_Get_is_executing( &the_thread->Registers );
623}
624#endif
625
626/**
627 * This function returns true if the_thread is the heir
628 * thread, and false otherwise.
629 */
630
631RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
632  const Thread_Control *the_thread
633)
634{
635  return ( the_thread == _Thread_Heir );
636}
637
638/**
639 * This routine clears any blocking state for the_thread.  It performs
640 * any necessary scheduling operations including the selection of
641 * a new heir thread.
642 */
643
644RTEMS_INLINE_ROUTINE void _Thread_Unblock (
645  Thread_Control *the_thread
646)
647{
648  _Thread_Clear_state( the_thread, STATES_BLOCKED );
649}
650
651/**
652 * This function returns true if the floating point context of
653 * the_thread is currently loaded in the floating point unit, and
654 * false otherwise.
655 */
656
657#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
658RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
659  const Thread_Control *the_thread
660)
661{
662  return ( the_thread == _Thread_Allocated_fp );
663}
664#endif
665
666/*
667 *  If the CPU has hardware floating point, then we must address saving
668 *  and restoring it as part of the context switch.
669 *
670 *  The second conditional compilation section selects the algorithm used
671 *  to context switch between floating point tasks.  The deferred algorithm
672 *  can be significantly better in a system with few floating point tasks
673 *  because it reduces the total number of save and restore FP context
674 *  operations.  However, this algorithm can not be used on all CPUs due
675 *  to unpredictable use of FP registers by some compilers for integer
676 *  operations.
677 */
678
679RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
680{
681#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
682#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
683  if ( executing->fp_context != NULL )
684    _Context_Save_fp( &executing->fp_context );
685#endif
686#endif
687}
688
689RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
690{
691#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
692#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
693  if ( (executing->fp_context != NULL) &&
694       !_Thread_Is_allocated_fp( executing ) ) {
695    if ( _Thread_Allocated_fp != NULL )
696      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
697    _Context_Restore_fp( &executing->fp_context );
698    _Thread_Allocated_fp = executing;
699  }
700#else
701  if ( executing->fp_context != NULL )
702    _Context_Restore_fp( &executing->fp_context );
703#endif
704#endif
705}
706
707/**
708 * This routine is invoked when the currently loaded floating
709 * point context is now longer associated with an active thread.
710 */
711
712#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
713RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
714{
715  _Thread_Allocated_fp = NULL;
716}
717#endif
718
719/**
720 * This function returns true if dispatching is disabled, and false
721 * otherwise.
722 */
723
724RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
725{
726  return ( _Thread_Dispatch_necessary );
727}
728
729/**
730 * This function returns true if the_thread is NULL and false otherwise.
731 */
732
733RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
734  const Thread_Control *the_thread
735)
736{
737  return ( the_thread == NULL );
738}
739
740/**
741 * @brief Is proxy blocking.
742 *
743 * status which indicates that a proxy is blocking, and false otherwise.
744 */
745RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
746  uint32_t   code
747)
748{
749  return (code == THREAD_STATUS_PROXY_BLOCKING);
750}
751
752RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
753{
754  /* Idle threads */
755  uint32_t maximum_internal_threads =
756    rtems_configuration_get_maximum_processors();
757
758  /* MPCI thread */
759#if defined(RTEMS_MULTIPROCESSING)
760  if ( _System_state_Is_multiprocessing ) {
761    ++maximum_internal_threads;
762  }
763#endif
764
765  return maximum_internal_threads;
766}
767
768RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
769{
770  return (Thread_Control *)
771    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
772}
773
774/**
775 * @brief Gets the heir of the processor and makes it executing.
776 *
777 * Must be called with interrupts disabled.  The thread dispatch necessary
778 * indicator is cleared as a side-effect.
779 *
780 * @return The heir thread.
781 *
782 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
783 * _Thread_Dispatch_update_heir().
784 */
785RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
786  Per_CPU_Control *cpu_self
787)
788{
789  Thread_Control *heir;
790
791  heir = cpu_self->heir;
792  cpu_self->dispatch_necessary = false;
793  cpu_self->executing = heir;
794
795  return heir;
796}
797
798RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
799  Thread_Control  *the_thread,
800  Per_CPU_Control *cpu
801)
802{
803  Timestamp_Control last;
804  Timestamp_Control ran;
805
806  last = cpu->cpu_usage_timestamp;
807  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
808  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
809  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
810}
811
812#if defined( RTEMS_SMP )
813RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
814  Per_CPU_Control *cpu_self,
815  Per_CPU_Control *cpu_for_heir,
816  Thread_Control  *heir
817)
818{
819  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
820
821  cpu_for_heir->heir = heir;
822
823  _Thread_Dispatch_request( cpu_self, cpu_for_heir );
824}
825#endif
826
827void _Thread_Get_CPU_time_used(
828  Thread_Control    *the_thread,
829  Timestamp_Control *cpu_time_used
830);
831
832RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
833  Thread_Action_control *action_control
834)
835{
836  _Chain_Initialize_empty( &action_control->Chain );
837}
838
839RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
840  Thread_Action *action
841)
842{
843  _Chain_Set_off_chain( &action->Node );
844}
845
846RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
847  Thread_Control        *the_thread,
848  Thread_Action         *action,
849  Thread_Action_handler  handler
850)
851{
852  Per_CPU_Control *cpu_of_thread;
853
854  _Assert( _Thread_State_is_owner( the_thread ) );
855
856  cpu_of_thread = _Thread_Get_CPU( the_thread );
857
858  action->handler = handler;
859
860  _Thread_Dispatch_request( _Per_CPU_Get(), cpu_of_thread );
861
862  _Chain_Append_if_is_off_chain_unprotected(
863    &the_thread->Post_switch_actions.Chain,
864    &action->Node
865  );
866}
867
868RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
869  Thread_Life_state life_state
870)
871{
872  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
873}
874
875RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
876  Thread_Life_state life_state
877)
878{
879  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
880}
881
882RTEMS_INLINE_ROUTINE bool _Thread_Is_life_change_allowed(
883  Thread_Life_state life_state
884)
885{
886  return ( life_state
887    & ( THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ) ) == 0;
888}
889
890RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
891  Thread_Life_state life_state
892)
893{
894  return ( life_state
895    & ( THREAD_LIFE_RESTARTING | THREAD_LIFE_TERMINATING ) ) != 0;
896}
897
898RTEMS_INLINE_ROUTINE bool _Thread_Is_joinable(
899  const Thread_Control *the_thread
900)
901{
902  _Assert( _Thread_State_is_owner( the_thread ) );
903  return ( the_thread->Life.state & THREAD_LIFE_DETACHED ) == 0;
904}
905
906/**
907 * @brief Returns true if the thread owns resources, and false otherwise.
908 *
909 * Resources are accounted with the Thread_Control::resource_count resource
910 * counter.  This counter is used by semaphore objects for example.
911 *
912 * In addition to the resource counter there is a resource dependency tree
913 * available on SMP configurations.  In case this tree is non-empty, then the
914 * thread owns resources.
915 *
916 * @param[in] the_thread The thread.
917 */
918RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
919  const Thread_Control *the_thread
920)
921{
922  bool owns_resources = the_thread->resource_count != 0;
923
924#if defined(RTEMS_SMP)
925  owns_resources = owns_resources
926    || _Resource_Node_owns_resources( &the_thread->Resource_node );
927#endif
928
929  return owns_resources;
930}
931
932/**
933 * @brief Returns the priority of the thread.
934 *
935 * Returns the user API and thread wait information relevant thread priority.
936 * This includes temporary thread priority adjustments due to locking
937 * protocols, a job release or the POSIX sporadic server for example.
938 *
939 * @return The priority of the thread.
940 *
941 * @see _Scheduler_Node_get_priority().
942 */
943RTEMS_INLINE_ROUTINE Priority_Control _Thread_Get_priority(
944  const Thread_Control *the_thread
945)
946{
947  return the_thread->current_priority;
948}
949
950/**
951 * @brief Acquires the thread wait default lock inside a critical section
952 * (interrupts disabled).
953 *
954 * @param[in] the_thread The thread.
955 * @param[in] lock_context The lock context used for the corresponding lock
956 *   release.
957 *
958 * @see _Thread_Wait_release_default_critical().
959 */
960RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default_critical(
961  Thread_Control   *the_thread,
962  ISR_lock_Context *lock_context
963)
964{
965  _ISR_lock_Acquire( &the_thread->Wait.Lock.Default, lock_context );
966}
967
968/**
969 * @brief Acquires the thread wait default lock and returns the executing
970 * thread.
971 *
972 * @param[in] lock_context The lock context used for the corresponding lock
973 *   release.
974 *
975 * @return The executing thread.
976 *
977 * @see _Thread_Wait_release_default().
978 */
979RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Wait_acquire_default_for_executing(
980  ISR_lock_Context *lock_context
981)
982{
983  Thread_Control *executing;
984
985  _ISR_lock_ISR_disable( lock_context );
986  executing = _Thread_Executing;
987  _Thread_Wait_acquire_default_critical( executing, lock_context );
988
989  return executing;
990}
991
992/**
993 * @brief Acquires the thread wait default lock and disables interrupts.
994 *
995 * @param[in] the_thread The thread.
996 * @param[in] lock_context The lock context used for the corresponding lock
997 *   release.
998 *
999 * @see _Thread_Wait_release_default().
1000 */
1001RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_default(
1002  Thread_Control   *the_thread,
1003  ISR_lock_Context *lock_context
1004)
1005{
1006  _ISR_lock_ISR_disable( lock_context );
1007  _Thread_Wait_acquire_default_critical( the_thread, lock_context );
1008}
1009
1010/**
1011 * @brief Releases the thread wait default lock inside a critical section
1012 * (interrupts disabled).
1013 *
1014 * The previous interrupt status is not restored.
1015 *
1016 * @param[in] the_thread The thread.
1017 * @param[in] lock_context The lock context used for the corresponding lock
1018 *   acquire.
1019 */
1020RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default_critical(
1021  Thread_Control   *the_thread,
1022  ISR_lock_Context *lock_context
1023)
1024{
1025  _ISR_lock_Release( &the_thread->Wait.Lock.Default, lock_context );
1026}
1027
1028/**
1029 * @brief Releases the thread wait default lock and restores the previous
1030 * interrupt status.
1031 *
1032 * @param[in] the_thread The thread.
1033 * @param[in] lock_context The lock context used for the corresponding lock
1034 *   acquire.
1035 */
1036RTEMS_INLINE_ROUTINE void _Thread_Wait_release_default(
1037  Thread_Control   *the_thread,
1038  ISR_lock_Context *lock_context
1039)
1040{
1041  _Thread_Wait_release_default_critical( the_thread, lock_context );
1042  _ISR_lock_ISR_enable( lock_context );
1043}
1044
1045#if defined(RTEMS_SMP)
1046#define THREAD_QUEUE_CONTEXT_OF_REQUEST( node ) \
1047  RTEMS_CONTAINER_OF( node, Thread_queue_Context, Lock_context.Wait.Gate.Node )
1048
1049RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request_locked(
1050  Thread_Control            *the_thread,
1051  Thread_queue_Lock_context *queue_lock_context
1052)
1053{
1054  Chain_Node *first;
1055
1056  _Chain_Extract_unprotected( &queue_lock_context->Wait.Gate.Node );
1057  first = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1058
1059  if ( first != _Chain_Tail( &the_thread->Wait.Lock.Pending_requests ) ) {
1060    _Thread_queue_Gate_open( (Thread_queue_Gate *) first );
1061  }
1062}
1063
1064RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_queue_critical(
1065  Thread_queue_Queue        *queue,
1066  Thread_queue_Lock_context *queue_lock_context
1067)
1068{
1069  _Thread_queue_Queue_acquire_critical(
1070    queue,
1071    &_Thread_Executing->Potpourri_stats,
1072    &queue_lock_context->Lock_context
1073  );
1074}
1075
1076RTEMS_INLINE_ROUTINE void _Thread_Wait_release_queue_critical(
1077  Thread_queue_Queue        *queue,
1078  Thread_queue_Lock_context *queue_lock_context
1079)
1080{
1081  _Thread_queue_Queue_release_critical(
1082    queue,
1083    &queue_lock_context->Lock_context
1084  );
1085}
1086#endif
1087
1088/**
1089 * @brief Acquires the thread wait lock inside a critical section (interrupts
1090 * disabled).
1091 *
1092 * @param[in] the_thread The thread.
1093 * @param[in] queue_context The thread queue context for the corresponding
1094 *   _Thread_Wait_release_critical().
1095 *
1096 * @see _Thread_queue_Context_initialize().
1097 */
1098RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire_critical(
1099  Thread_Control       *the_thread,
1100  Thread_queue_Context *queue_context
1101)
1102{
1103#if defined(RTEMS_SMP)
1104  Thread_queue_Queue *queue;
1105
1106  _Thread_Wait_acquire_default_critical(
1107    the_thread,
1108    &queue_context->Lock_context.Lock_context
1109  );
1110
1111  queue = the_thread->Wait.queue;
1112  queue_context->Lock_context.Wait.queue = queue;
1113
1114  if ( queue != NULL ) {
1115    _Thread_queue_Gate_add(
1116      &the_thread->Wait.Lock.Pending_requests,
1117      &queue_context->Lock_context.Wait.Gate
1118    );
1119    _Thread_Wait_release_default_critical(
1120      the_thread,
1121      &queue_context->Lock_context.Lock_context
1122    );
1123    _Thread_Wait_acquire_queue_critical( queue, &queue_context->Lock_context );
1124
1125    if ( queue_context->Lock_context.Wait.queue == NULL ) {
1126      _Thread_Wait_release_queue_critical(
1127        queue,
1128        &queue_context->Lock_context
1129      );
1130      _Thread_Wait_acquire_default_critical(
1131        the_thread,
1132        &queue_context->Lock_context.Lock_context
1133      );
1134      _Thread_Wait_remove_request_locked(
1135        the_thread,
1136        &queue_context->Lock_context
1137      );
1138      _Assert( the_thread->Wait.queue == NULL );
1139    }
1140  }
1141#else
1142  (void) the_thread;
1143  (void) queue_context;
1144#endif
1145}
1146
1147/**
1148 * @brief Acquires the thread wait default lock and disables interrupts.
1149 *
1150 * @param[in] the_thread The thread.
1151 * @param[in] queue_context The thread queue context for the corresponding
1152 *   _Thread_Wait_release().
1153 */
1154RTEMS_INLINE_ROUTINE void _Thread_Wait_acquire(
1155  Thread_Control       *the_thread,
1156  Thread_queue_Context *queue_context
1157)
1158{
1159  _Thread_queue_Context_initialize( queue_context );
1160  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
1161  _Thread_Wait_acquire_critical( the_thread, queue_context );
1162}
1163
1164/**
1165 * @brief Releases the thread wait lock inside a critical section (interrupts
1166 * disabled).
1167 *
1168 * The previous interrupt status is not restored.
1169 *
1170 * @param[in] the_thread The thread.
1171 * @param[in] queue_context The thread queue context used for corresponding
1172 *   _Thread_Wait_acquire_critical().
1173 */
1174RTEMS_INLINE_ROUTINE void _Thread_Wait_release_critical(
1175  Thread_Control       *the_thread,
1176  Thread_queue_Context *queue_context
1177)
1178{
1179#if defined(RTEMS_SMP)
1180  Thread_queue_Queue *queue;
1181
1182  queue = queue_context->Lock_context.Wait.queue;
1183
1184  if ( queue != NULL ) {
1185    _Thread_Wait_release_queue_critical(
1186      queue, &queue_context->Lock_context
1187    );
1188    _Thread_Wait_acquire_default_critical(
1189      the_thread,
1190      &queue_context->Lock_context.Lock_context
1191    );
1192    _Thread_Wait_remove_request_locked(
1193      the_thread,
1194      &queue_context->Lock_context
1195    );
1196  }
1197
1198  _Thread_Wait_release_default_critical(
1199    the_thread,
1200    &queue_context->Lock_context.Lock_context
1201  );
1202#else
1203  (void) the_thread;
1204  (void) queue_context;
1205#endif
1206}
1207
1208/**
1209 * @brief Releases the thread wait lock and restores the previous interrupt
1210 * status.
1211 *
1212 * @param[in] the_thread The thread.
1213 * @param[in] queue_context The thread queue context used for corresponding
1214 *   _Thread_Wait_acquire().
1215 */
1216RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
1217  Thread_Control       *the_thread,
1218  Thread_queue_Context *queue_context
1219)
1220{
1221  _Thread_Wait_release_critical( the_thread, queue_context );
1222  _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
1223}
1224
1225/**
1226 * @brief Claims the thread wait queue and operations.
1227 *
1228 * The caller must not be the owner of the default thread wait lock.  The
1229 * caller must be the owner of the corresponding thread queue lock.
1230 *
1231 * @param[in] the_thread The thread.
1232 * @param[in] queue The new thread queue.
1233 * @param[in] operations The new thread operations.
1234 *
1235 * @see _Thread_Wait_restore_default().
1236 */
1237RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
1238  Thread_Control                *the_thread,
1239  Thread_queue_Queue            *queue,
1240  const Thread_queue_Operations *operations
1241)
1242{
1243  ISR_lock_Context lock_context;
1244
1245  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1246
1247  _Assert( the_thread->Wait.queue == NULL );
1248
1249#if defined(RTEMS_SMP)
1250  _Chain_Initialize_empty( &the_thread->Wait.Lock.Pending_requests );
1251  _Chain_Initialize_node( &the_thread->Wait.Lock.Tranquilizer.Node );
1252  _Thread_queue_Gate_close( &the_thread->Wait.Lock.Tranquilizer );
1253#endif
1254
1255  the_thread->Wait.queue = queue;
1256  the_thread->Wait.operations = operations;
1257
1258  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1259}
1260
1261/**
1262 * @brief Removes a thread wait lock request.
1263 *
1264 * On SMP configurations, removes a thread wait lock request.
1265 *
1266 * On other configurations, this function does nothing.
1267 *
1268 * @param[in] the_thread The thread.
1269 * @param[in] queue_lock_context The thread queue lock context used for
1270 *   corresponding _Thread_Wait_acquire().
1271 */
1272RTEMS_INLINE_ROUTINE void _Thread_Wait_remove_request(
1273  Thread_Control            *the_thread,
1274  Thread_queue_Lock_context *queue_lock_context
1275)
1276{
1277#if defined(RTEMS_SMP)
1278  ISR_lock_Context lock_context;
1279
1280  _Thread_Wait_acquire_default( the_thread, &lock_context );
1281  _Thread_Wait_remove_request_locked( the_thread, queue_lock_context );
1282  _Thread_Wait_release_default( the_thread, &lock_context );
1283#else
1284  (void) the_thread;
1285  (void) queue_lock_context;
1286#endif
1287}
1288
1289/**
1290 * @brief Restores the default thread wait queue and operations.
1291 *
1292 * The caller must be the owner of the current thread wait queue lock.
1293 *
1294 * On SMP configurations, the pending requests are updated to use the stale
1295 * thread queue operations.
1296 *
1297 * @param[in] the_thread The thread.
1298 *
1299 * @see _Thread_Wait_claim().
1300 */
1301RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default(
1302  Thread_Control *the_thread
1303)
1304{
1305#if defined(RTEMS_SMP)
1306  ISR_lock_Context  lock_context;
1307  Chain_Node       *node;
1308  const Chain_Node *tail;
1309
1310  _Thread_Wait_acquire_default_critical( the_thread, &lock_context );
1311
1312  node = _Chain_First( &the_thread->Wait.Lock.Pending_requests );
1313  tail = _Chain_Immutable_tail( &the_thread->Wait.Lock.Pending_requests );
1314
1315  if ( node != tail ) {
1316    do {
1317      Thread_queue_Context *queue_context;
1318
1319      queue_context = THREAD_QUEUE_CONTEXT_OF_REQUEST( node );
1320      queue_context->Lock_context.Wait.queue = NULL;
1321
1322      node = _Chain_Next( node );
1323    } while ( node != tail );
1324
1325    _Thread_queue_Gate_add(
1326      &the_thread->Wait.Lock.Pending_requests,
1327      &the_thread->Wait.Lock.Tranquilizer
1328    );
1329  } else {
1330    _Thread_queue_Gate_open( &the_thread->Wait.Lock.Tranquilizer );
1331  }
1332#endif
1333
1334  the_thread->Wait.queue = NULL;
1335  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1336
1337#if defined(RTEMS_SMP)
1338  _Thread_Wait_release_default_critical( the_thread, &lock_context );
1339#endif
1340}
1341
1342/**
1343 * @brief Tranquilizes the thread after a wait on a thread queue.
1344 *
1345 * After the violent blocking procedure this function makes the thread calm and
1346 * peaceful again so that it can carry out its normal work.
1347 *
1348 * On SMP configurations, ensures that all pending thread wait lock requests
1349 * completed before the thread is able to begin a new thread wait procedure.
1350 *
1351 * On other configurations, this function does nothing.
1352 *
1353 * It must be called after a _Thread_Wait_claim() exactly once
1354 *  - after the corresponding thread queue lock was released, and
1355 *  - the default wait state is restored or some other processor is about to do
1356 *    this.
1357 *
1358 * @param[in] the_thread The thread.
1359 */
1360RTEMS_INLINE_ROUTINE void _Thread_Wait_tranquilize(
1361  Thread_Control *the_thread
1362)
1363{
1364#if defined(RTEMS_SMP)
1365  _Thread_queue_Gate_wait( &the_thread->Wait.Lock.Tranquilizer );
1366#else
1367  (void) the_thread;
1368#endif
1369}
1370
1371/**
1372 * @brief Cancels a thread wait on a thread queue.
1373 *
1374 * @param[in] the_thread The thread.
1375 * @param[in] queue_context The thread queue context used for corresponding
1376 *   _Thread_Wait_acquire().
1377 */
1378RTEMS_INLINE_ROUTINE void _Thread_Wait_cancel(
1379  Thread_Control       *the_thread,
1380  Thread_queue_Context *queue_context
1381)
1382{
1383  Thread_queue_Queue *queue;
1384
1385  queue = the_thread->Wait.queue;
1386
1387#if defined(RTEMS_SMP)
1388  if ( queue != NULL ) {
1389    _Assert( queue_context->Lock_context.Wait.queue == queue );
1390#endif
1391
1392    ( *the_thread->Wait.operations->extract )( queue, the_thread );
1393    _Thread_Wait_restore_default( the_thread );
1394
1395#if defined(RTEMS_SMP)
1396    _Assert( queue_context->Lock_context.Wait.queue == NULL );
1397    queue_context->Lock_context.Wait.queue = queue;
1398  }
1399#endif
1400}
1401
1402/**
1403 * @brief The initial thread wait flags value set by _Thread_Initialize().
1404 */
1405#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1406
1407/**
1408 * @brief Mask to get the thread wait state flags.
1409 */
1410#define THREAD_WAIT_STATE_MASK 0xffU
1411
1412/**
1413 * @brief Indicates that the thread begins with the blocking operation.
1414 *
1415 * A blocking operation consists of an optional watchdog initialization and the
1416 * setting of the appropriate thread blocking state with the corresponding
1417 * scheduler block operation.
1418 */
1419#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1420
1421/**
1422 * @brief Indicates that the thread completed the blocking operation.
1423 */
1424#define THREAD_WAIT_STATE_BLOCKED 0x2U
1425
1426/**
1427 * @brief Indicates that a condition to end the thread wait occurred.
1428 *
1429 * This could be a timeout, a signal, an event or a resource availability.
1430 */
1431#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1432
1433/**
1434 * @brief Mask to get the thread wait class flags.
1435 */
1436#define THREAD_WAIT_CLASS_MASK 0xff00U
1437
1438/**
1439 * @brief Indicates that the thread waits for an event.
1440 */
1441#define THREAD_WAIT_CLASS_EVENT 0x100U
1442
1443/**
1444 * @brief Indicates that the thread waits for a system event.
1445 */
1446#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1447
1448/**
1449 * @brief Indicates that the thread waits for an object.
1450 */
1451#define THREAD_WAIT_CLASS_OBJECT 0x400U
1452
1453/**
1454 * @brief Indicates that the thread waits for a period.
1455 */
1456#define THREAD_WAIT_CLASS_PERIOD 0x800U
1457
1458RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1459  Thread_Control    *the_thread,
1460  Thread_Wait_flags  flags
1461)
1462{
1463#if defined(RTEMS_SMP)
1464  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1465#else
1466  the_thread->Wait.flags = flags;
1467#endif
1468}
1469
1470RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1471  const Thread_Control *the_thread
1472)
1473{
1474#if defined(RTEMS_SMP)
1475  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1476#else
1477  return the_thread->Wait.flags;
1478#endif
1479}
1480
1481/**
1482 * @brief Tries to change the thread wait flags with release semantics in case
1483 * of success.
1484 *
1485 * Must be called inside a critical section (interrupts disabled).
1486 *
1487 * In case the wait flags are equal to the expected wait flags, then the wait
1488 * flags are set to the desired wait flags.
1489 *
1490 * @param[in] the_thread The thread.
1491 * @param[in] expected_flags The expected wait flags.
1492 * @param[in] desired_flags The desired wait flags.
1493 *
1494 * @retval true The wait flags were equal to the expected wait flags.
1495 * @retval false Otherwise.
1496 */
1497RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_release(
1498  Thread_Control    *the_thread,
1499  Thread_Wait_flags  expected_flags,
1500  Thread_Wait_flags  desired_flags
1501)
1502{
1503  _Assert( _ISR_Get_level() != 0 );
1504
1505#if defined(RTEMS_SMP)
1506  return _Atomic_Compare_exchange_uint(
1507    &the_thread->Wait.flags,
1508    &expected_flags,
1509    desired_flags,
1510    ATOMIC_ORDER_RELEASE,
1511    ATOMIC_ORDER_RELAXED
1512  );
1513#else
1514  bool success = ( the_thread->Wait.flags == expected_flags );
1515
1516  if ( success ) {
1517    the_thread->Wait.flags = desired_flags;
1518  }
1519
1520  return success;
1521#endif
1522}
1523
1524/**
1525 * @brief Tries to change the thread wait flags with acquire semantics.
1526 *
1527 * In case the wait flags are equal to the expected wait flags, then the wait
1528 * flags are set to the desired wait flags.
1529 *
1530 * @param[in] the_thread The thread.
1531 * @param[in] expected_flags The expected wait flags.
1532 * @param[in] desired_flags The desired wait flags.
1533 *
1534 * @retval true The wait flags were equal to the expected wait flags.
1535 * @retval false Otherwise.
1536 */
1537RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_acquire(
1538  Thread_Control    *the_thread,
1539  Thread_Wait_flags  expected_flags,
1540  Thread_Wait_flags  desired_flags
1541)
1542{
1543  bool success;
1544#if defined(RTEMS_SMP)
1545  return _Atomic_Compare_exchange_uint(
1546    &the_thread->Wait.flags,
1547    &expected_flags,
1548    desired_flags,
1549    ATOMIC_ORDER_ACQUIRE,
1550    ATOMIC_ORDER_ACQUIRE
1551  );
1552#else
1553  ISR_Level level;
1554
1555  _ISR_Local_disable( level );
1556
1557  success = _Thread_Wait_flags_try_change_release(
1558    the_thread,
1559    expected_flags,
1560    desired_flags
1561  );
1562
1563  _ISR_Local_enable( level );
1564#endif
1565
1566  return success;
1567}
1568
1569/**
1570 * @brief Returns the object identifier of the object containing the current
1571 * thread wait queue.
1572 *
1573 * This function may be used for debug and system information purposes.  The
1574 * caller must be the owner of the thread lock.
1575 *
1576 * @retval 0 The thread waits on no thread queue currently, the thread wait
1577 *   queue is not contained in an object, or the current thread state provides
1578 *   insufficient information, e.g. the thread is in the middle of a blocking
1579 *   operation.
1580 * @retval other The object identifier of the object containing the thread wait
1581 *   queue.
1582 */
1583Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1584
1585RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
1586  const Thread_Control *the_thread
1587)
1588{
1589  return (Status_Control) the_thread->Wait.return_code;
1590}
1591
1592/**
1593 * @brief General purpose thread wait timeout.
1594 *
1595 * @param[in] watchdog The thread timer watchdog.
1596 */
1597void _Thread_Timeout( Watchdog_Control *watchdog );
1598
1599RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1600  Thread_Timer_information *timer,
1601  Per_CPU_Control          *cpu
1602)
1603{
1604  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1605  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1606  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1607}
1608
1609RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative(
1610  Thread_Control                 *the_thread,
1611  Per_CPU_Control                *cpu,
1612  Watchdog_Service_routine_entry  routine,
1613  Watchdog_Interval               ticks
1614)
1615{
1616  ISR_lock_Context lock_context;
1617
1618  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1619
1620  the_thread->Timer.header =
1621    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1622  the_thread->Timer.Watchdog.routine = routine;
1623  _Watchdog_Per_CPU_insert_relative( &the_thread->Timer.Watchdog, cpu, ticks );
1624
1625  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1626}
1627
1628RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_absolute(
1629  Thread_Control                 *the_thread,
1630  Per_CPU_Control                *cpu,
1631  Watchdog_Service_routine_entry  routine,
1632  uint64_t                        expire
1633)
1634{
1635  ISR_lock_Context lock_context;
1636
1637  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1638
1639  the_thread->Timer.header =
1640    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_ABSOLUTE ];
1641  the_thread->Timer.Watchdog.routine = routine;
1642  _Watchdog_Per_CPU_insert_absolute( &the_thread->Timer.Watchdog, cpu, expire );
1643
1644  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1645}
1646
1647RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1648{
1649  ISR_lock_Context lock_context;
1650
1651  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1652
1653  _Watchdog_Per_CPU_remove(
1654    &the_thread->Timer.Watchdog,
1655#if defined(RTEMS_SMP)
1656    the_thread->Timer.Watchdog.cpu,
1657#else
1658    _Per_CPU_Get(),
1659#endif
1660    the_thread->Timer.header
1661  );
1662
1663  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1664}
1665
1666RTEMS_INLINE_ROUTINE void _Thread_Remove_timer_and_unblock(
1667  Thread_Control     *the_thread,
1668  Thread_queue_Queue *queue
1669)
1670{
1671  _Thread_Wait_tranquilize( the_thread );
1672  _Thread_Timer_remove( the_thread );
1673
1674#if defined(RTEMS_MULTIPROCESSING)
1675  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
1676    _Thread_Unblock( the_thread );
1677  } else {
1678    _Thread_queue_Unblock_proxy( queue, the_thread );
1679  }
1680#else
1681  (void) queue;
1682  _Thread_Unblock( the_thread );
1683#endif
1684}
1685
1686/** @}*/
1687
1688#ifdef __cplusplus
1689}
1690#endif
1691
1692#if defined(RTEMS_MULTIPROCESSING)
1693#include <rtems/score/threadmp.h>
1694#endif
1695
1696#endif
1697/* end of include file */
Note: See TracBrowser for help on using the repository browser.