source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 97312fcc

5
Last change on this file since 97312fcc was 97312fcc, checked in by Sebastian Huber <sebastian.huber@…>, on 04/05/16 at 12:36:30

score: Delete Thread_Wait_information::id

This field was only by the monitor in non-multiprocessing
configurations. Add new field Thread_Wait_information::remote_id in
multiprocessing configurations and use it for the remote procedure call
thread queue.

Add _Thread_Wait_get_id() to obtain the object identifier for debug and
system information tools. Ensure the object layout via static asserts.
Add test cases to sptests/spthreadq01.

  • Property mode set to 100644
File size: 40.9 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014-2015 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/resourceimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/sysstate.h>
33#include <rtems/score/threadqimpl.h>
34#include <rtems/score/todimpl.h>
35#include <rtems/score/freechain.h>
36#include <rtems/score/watchdogimpl.h>
37#include <rtems/config.h>
38
39#ifdef __cplusplus
40extern "C" {
41#endif
42
43/**
44 * @addtogroup ScoreThread
45 */
46/**@{**/
47
48/**
49 *  The following structure contains the information necessary to manage
50 *  a thread which it is  waiting for a resource.
51 */
52#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
53
54/**
55 *  Self for the GNU Ada Run-Time
56 */
57extern void *rtems_ada_self;
58
59typedef struct {
60  Objects_Information Objects;
61
62  Freechain_Control Free_thread_queue_heads;
63} Thread_Information;
64
65/**
66 *  The following defines the information control block used to
67 *  manage this class of objects.
68 */
69extern Thread_Information _Thread_Internal_information;
70
71/**
72 *  The following points to the thread whose floating point
73 *  context is currently loaded.
74 */
75#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
76extern Thread_Control *_Thread_Allocated_fp;
77#endif
78
79#define THREAD_CHAIN_NODE_TO_THREAD( node ) \
80  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.Chain )
81
82#define THREAD_RBTREE_NODE_TO_THREAD( node ) \
83  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.RBTree )
84
85#if defined(RTEMS_SMP)
86#define THREAD_RESOURCE_NODE_TO_THREAD( node ) \
87  RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node )
88#endif
89
90void _Thread_Initialize_information(
91  Thread_Information  *information,
92  Objects_APIs         the_api,
93  uint16_t             the_class,
94  uint32_t             maximum,
95  bool                 is_string,
96  uint32_t             maximum_name_length
97#if defined(RTEMS_MULTIPROCESSING)
98  ,
99  bool                 supports_global
100#endif
101);
102
103/**
104 *  @brief Initialize thread handler.
105 *
106 *  This routine performs the initialization necessary for this handler.
107 */
108void _Thread_Handler_initialization(void);
109
110/**
111 *  @brief Create idle thread.
112 *
113 *  This routine creates the idle thread.
114 *
115 *  @warning No thread should be created before this one.
116 */
117void _Thread_Create_idle(void);
118
119/**
120 *  @brief Start thread multitasking.
121 *
122 *  This routine initiates multitasking.  It is invoked only as
123 *  part of initialization and its invocation is the last act of
124 *  the non-multitasking part of the system initialization.
125 */
126void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
127
128/**
129 *  @brief Allocate the requested stack space for the thread.
130 *
131 *  Allocate the requested stack space for the thread.
132 *  Set the Start.stack field to the address of the stack.
133 *
134 *  @param[in] the_thread is the thread where the stack space is requested
135 *  @param[in] stack_size is the stack space is requested
136 *
137 *  @retval actual size allocated after any adjustment
138 *  @retval zero if the allocation failed
139 */
140size_t _Thread_Stack_Allocate(
141  Thread_Control *the_thread,
142  size_t          stack_size
143);
144
145/**
146 *  @brief Deallocate thread stack.
147 *
148 *  Deallocate the Thread's stack.
149 */
150void _Thread_Stack_Free(
151  Thread_Control *the_thread
152);
153
154/**
155 *  @brief Initialize thread.
156 *
157 *  This routine initializes the specified the thread.  It allocates
158 *  all memory associated with this thread.  It completes by adding
159 *  the thread to the local object table so operations on this
160 *  thread id are allowed.
161 *
162 *  @note If stack_area is NULL, it is allocated from the workspace.
163 *
164 *  @note If the stack is allocated from the workspace, then it is
165 *        guaranteed to be of at least minimum size.
166 */
167bool _Thread_Initialize(
168  Thread_Information                   *information,
169  Thread_Control                       *the_thread,
170  const struct Scheduler_Control       *scheduler,
171  void                                 *stack_area,
172  size_t                                stack_size,
173  bool                                  is_fp,
174  Priority_Control                      priority,
175  bool                                  is_preemptible,
176  Thread_CPU_budget_algorithms          budget_algorithm,
177  Thread_CPU_budget_algorithm_callout   budget_callout,
178  uint32_t                              isr_level,
179  Objects_Name                          name
180);
181
182/**
183 *  @brief Initializes thread and executes it.
184 *
185 *  This routine initializes the executable information for a thread
186 *  and makes it ready to execute.  After this routine executes, the
187 *  thread competes with all other threads for CPU time.
188 *
189 *  @param the_thread The thread to be started.
190 *  @param entry The thread entry information.
191 */
192bool _Thread_Start(
193  Thread_Control                 *the_thread,
194  const Thread_Entry_information *entry
195);
196
197bool _Thread_Restart(
198  Thread_Control                 *the_thread,
199  Thread_Control                 *executing,
200  const Thread_Entry_information *entry
201);
202
203void _Thread_Yield( Thread_Control *executing );
204
205bool _Thread_Set_life_protection( bool protect );
206
207void _Thread_Life_action_handler(
208  Thread_Control  *executing,
209  Thread_Action   *action,
210  Per_CPU_Control *cpu,
211  ISR_Level        level
212);
213
214/**
215 * @brief Kills all zombie threads in the system.
216 *
217 * Threads change into the zombie state as the last step in the thread
218 * termination sequence right before a context switch to the heir thread is
219 * initiated.  Since the thread stack is still in use during this phase we have
220 * to postpone the thread stack reclamation until this point.  On SMP
221 * configurations we may have to busy wait for context switch completion here.
222 */
223void _Thread_Kill_zombies( void );
224
225/**
226 * @brief Closes the thread.
227 *
228 * Closes the thread object and starts the thread termination sequence.  In
229 * case the executing thread is not terminated, then this function waits until
230 * the terminating thread reached the zombie state.
231 */
232void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
233
234/**
235 * @brief Clears the specified thread state.
236 *
237 * In case the previous state is a non-ready state and the next state is the
238 * ready state, then the thread is unblocked by the scheduler.
239 *
240 * @param[in] the_thread The thread.
241 * @param[in] state The state to clear.  It must not be zero.
242 *
243 * @return The previous state.
244 */
245States_Control _Thread_Clear_state(
246  Thread_Control *the_thread,
247  States_Control  state
248);
249
250/**
251 * @brief Sets the specified thread state.
252 *
253 * In case the previous state is the ready state, then the thread is blocked by
254 * the scheduler.
255 *
256 * @param[in] the_thread The thread.
257 * @param[in] state The state to set.  It must not be zero.
258 *
259 * @return The previous state.
260 */
261States_Control _Thread_Set_state(
262  Thread_Control *the_thread,
263  States_Control  state
264);
265
266/**
267 * @brief Clears all thread states.
268 *
269 * In case the previous state is a non-ready state, then the thread is
270 * unblocked by the scheduler.
271 *
272 * @param[in] the_thread The thread.
273 */
274RTEMS_INLINE_ROUTINE void _Thread_Ready(
275  Thread_Control *the_thread
276)
277{
278  _Thread_Clear_state( the_thread, STATES_ALL_SET );
279}
280
281/**
282 *  @brief Initializes enviroment for a thread.
283 *
284 *  This routine initializes the context of @a the_thread to its
285 *  appropriate starting state.
286 *
287 *  @param[in] the_thread is the pointer to the thread control block.
288 */
289void _Thread_Load_environment(
290  Thread_Control *the_thread
291);
292
293void _Thread_Entry_adaptor_idle( Thread_Control *executing );
294
295void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
296
297void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
298
299/**
300 *  @brief Wrapper function for all threads.
301 *
302 *  This routine is the wrapper function for all threads.  It is
303 *  the starting point for all threads.  The user provided thread
304 *  entry point is invoked by this routine.  Operations
305 *  which must be performed immediately before and after the user's
306 *  thread executes are found here.
307 *
308 *  @note On entry, it is assumed all interrupts are blocked and that this
309 *  routine needs to set the initial isr level.  This may or may not
310 *  actually be needed by the context switch routine and as a result
311 *  interrupts may already be at there proper level.  Either way,
312 *  setting the initial isr level properly here is safe.
313 */
314void _Thread_Handler( void );
315
316/**
317 * @brief Executes the global constructors and then restarts itself as the
318 * first initialization thread.
319 *
320 * The first initialization thread is the first RTEMS initialization task or
321 * the first POSIX initialization thread in case no RTEMS initialization tasks
322 * are present.
323 */
324void _Thread_Global_construction(
325  Thread_Control                 *executing,
326  const Thread_Entry_information *entry
327) RTEMS_NO_RETURN;
328
329/**
330 *  @brief Ended the delay of a thread.
331 *
332 *  This routine is invoked when a thread must be unblocked at the
333 *  end of a time based delay (i.e. wake after or wake when).
334 *  It is called by the watchdog handler.
335 *
336 *  @param[in] id is the thread id
337 *  @param[in] ignored is not used
338 */
339void _Thread_Delay_ended(
340  Objects_Id  id,
341  void       *ignored
342);
343
344/**
345 * @brief Returns true if the left thread priority is less than the right
346 * thread priority in the intuitive sense of priority and false otherwise.
347 */
348RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
349  Priority_Control left,
350  Priority_Control right
351)
352{
353  return left > right;
354}
355
356/**
357 * @brief Returns the highest priority of the left and right thread priorities
358 * in the intuitive sense of priority.
359 */
360RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
361  Priority_Control left,
362  Priority_Control right
363)
364{
365  return _Thread_Priority_less_than( left, right ) ? right : left;
366}
367
368/**
369 * @brief Filters a thread priority change.
370 *
371 * Called by _Thread_Change_priority() under the protection of the thread lock.
372 *
373 * @param[in] the_thread The thread.
374 * @param[in, out] new_priority The new priority of the thread.  The filter may
375 * alter this value.
376 * @param[in] arg The argument passed to _Thread_Change_priority().
377 *
378 * @retval true Change the current priority.
379 * @retval false Otherwise.
380 */
381typedef bool ( *Thread_Change_priority_filter )(
382  Thread_Control   *the_thread,
383  Priority_Control *new_priority,
384  void             *arg
385);
386
387/**
388 * @brief Changes the priority of a thread if allowed by the filter function.
389 *
390 * It changes current priority of the thread to the new priority in case the
391 * filter function returns true.  In this case the scheduler is notified of the
392 * priority change as well.
393 *
394 * @param[in] the_thread The thread.
395 * @param[in] new_priority The new priority of the thread.
396 * @param[in] arg The argument for the filter function.
397 * @param[in] filter The filter function to determine if a priority change is
398 * allowed and optionally perform other actions under the protection of the
399 * thread lock simultaneously with the update of the current priority.
400 * @param[in] prepend_it In case this is true, then the thread is prepended to
401 * its priority group in its scheduler instance, otherwise it is appended.
402 */
403void _Thread_Change_priority(
404  Thread_Control                *the_thread,
405  Priority_Control               new_priority,
406  void                          *arg,
407  Thread_Change_priority_filter  filter,
408  bool                           prepend_it
409);
410
411/**
412 * @brief Raises the priority of a thread.
413 *
414 * It changes the current priority of the thread to the new priority if the new
415 * priority is higher than the current priority.  In this case the thread is
416 * appended to its new priority group in its scheduler instance.
417 *
418 * @param[in] the_thread The thread.
419 * @param[in] new_priority The new priority of the thread.
420 *
421 * @see _Thread_Change_priority().
422 */
423void _Thread_Raise_priority(
424  Thread_Control   *the_thread,
425  Priority_Control  new_priority
426);
427
428/**
429 * @brief Inherit the priority of a thread.
430 *
431 * It changes the current priority of the inheritor thread to the current priority
432 * of the ancestor thread if it is higher than the current priority of the inheritor
433 * thread.  In this case the inheritor thread is appended to its new priority group
434 * in its scheduler instance.
435 *
436 * On SMP configurations, the priority is changed to PRIORITY_PSEUDO_ISR in
437 * case the own schedulers of the inheritor and ancestor thread differ (priority
438 * boosting).
439 *
440 * @param[in] inheritor The thread to inherit the priority.
441 * @param[in] ancestor The thread to bequeath its priority to the inheritor
442 *   thread.
443 */
444#if defined(RTEMS_SMP)
445void _Thread_Inherit_priority(
446  Thread_Control *inheritor,
447  Thread_Control *ancestor
448);
449#else
450RTEMS_INLINE_ROUTINE void _Thread_Inherit_priority(
451  Thread_Control *inheritor,
452  Thread_Control *ancestor
453)
454{
455  _Thread_Raise_priority( inheritor, ancestor->current_priority );
456}
457#endif
458
459/**
460 * @brief Sets the current to the real priority of a thread.
461 *
462 * Sets the priority restore hint to false.
463 */
464void _Thread_Restore_priority( Thread_Control *the_thread );
465
466/**
467 * @brief Sets the priority of a thread.
468 *
469 * It sets the real priority of the thread.  In addition it changes the current
470 * priority of the thread if the new priority is higher than the current
471 * priority or the thread owns no resources.
472 *
473 * @param[in] the_thread The thread.
474 * @param[in] new_priority The new priority of the thread.
475 * @param[out] old_priority The old real priority of the thread.  This pointer
476 * must not be @c NULL.
477 * @param[in] prepend_it In case this is true, then the thread is prepended to
478 * its priority group in its scheduler instance, otherwise it is appended.
479 *
480 * @see _Thread_Change_priority().
481 */
482void _Thread_Set_priority(
483  Thread_Control   *the_thread,
484  Priority_Control  new_priority,
485  Priority_Control *old_priority,
486  bool              prepend_it
487);
488
489/**
490 *  @brief Maps thread Id to a TCB pointer.
491 *
492 *  This function maps thread IDs to thread control
493 *  blocks.  If ID corresponds to a local thread, then it
494 *  returns the_thread control pointer which maps to ID
495 *  and @a location is set to OBJECTS_LOCAL.  If the thread ID is
496 *  global and resides on a remote node, then location is set
497 *  to OBJECTS_REMOTE, and the_thread is undefined.
498 *  Otherwise, location is set to OBJECTS_ERROR and
499 *  the_thread is undefined.
500 *
501 *  @param[in] id is the id of the thread.
502 *  @param[in] location is the location of the block.
503 *
504 *  @note  The performance of many RTEMS services depends upon
505 *         the quick execution of the "good object" path in this
506 *         routine.  If there is a possibility of saving a few
507 *         cycles off the execution time, this routine is worth
508 *         further optimization attention.
509 */
510Thread_Control *_Thread_Get (
511  Objects_Id         id,
512  Objects_Locations *location
513);
514
515/**
516 * @brief Gets a thread by its identifier.
517 *
518 * @see _Objects_Get_isr_disable().
519 */
520Thread_Control *_Thread_Get_interrupt_disable(
521  Objects_Id         id,
522  Objects_Locations *location,
523  ISR_lock_Context  *lock_context
524);
525
526RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
527  const Thread_Control *thread
528)
529{
530#if defined(RTEMS_SMP)
531  return thread->Scheduler.cpu;
532#else
533  (void) thread;
534
535  return _Per_CPU_Get();
536#endif
537}
538
539RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
540  Thread_Control *thread,
541  Per_CPU_Control *cpu
542)
543{
544#if defined(RTEMS_SMP)
545  thread->Scheduler.cpu = cpu;
546#else
547  (void) thread;
548  (void) cpu;
549#endif
550}
551
552/**
553 * This function returns true if the_thread is the currently executing
554 * thread, and false otherwise.
555 */
556
557RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
558  const Thread_Control *the_thread
559)
560{
561  return ( the_thread == _Thread_Executing );
562}
563
564#if defined(RTEMS_SMP)
565/**
566 * @brief Returns @a true in case the thread executes currently on some
567 * processor in the system, otherwise @a false.
568 *
569 * Do not confuse this with _Thread_Is_executing() which checks only the
570 * current processor.
571 */
572RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
573  const Thread_Control *the_thread
574)
575{
576  return _CPU_Context_Get_is_executing( &the_thread->Registers );
577}
578#endif
579
580/**
581 * This function returns true if the_thread is the heir
582 * thread, and false otherwise.
583 */
584
585RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
586  const Thread_Control *the_thread
587)
588{
589  return ( the_thread == _Thread_Heir );
590}
591
592/**
593 * This routine clears any blocking state for the_thread.  It performs
594 * any necessary scheduling operations including the selection of
595 * a new heir thread.
596 */
597
598RTEMS_INLINE_ROUTINE void _Thread_Unblock (
599  Thread_Control *the_thread
600)
601{
602  _Thread_Clear_state( the_thread, STATES_BLOCKED );
603}
604
605/**
606 * This routine resets the current context of the calling thread
607 * to that of its initial state.
608 */
609
610RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
611{
612#if defined(RTEMS_SMP)
613  ISR_Level level;
614
615  _Giant_Release( _Per_CPU_Get() );
616
617  _ISR_Disable_without_giant( level );
618  ( void ) level;
619#endif
620
621#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
622  if ( executing->fp_context != NULL )
623    _Context_Restore_fp( &executing->fp_context );
624#endif
625
626  _CPU_Context_Restart_self( &executing->Registers );
627}
628
629/**
630 * This function returns true if the floating point context of
631 * the_thread is currently loaded in the floating point unit, and
632 * false otherwise.
633 */
634
635#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
636RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
637  const Thread_Control *the_thread
638)
639{
640  return ( the_thread == _Thread_Allocated_fp );
641}
642#endif
643
644/*
645 *  If the CPU has hardware floating point, then we must address saving
646 *  and restoring it as part of the context switch.
647 *
648 *  The second conditional compilation section selects the algorithm used
649 *  to context switch between floating point tasks.  The deferred algorithm
650 *  can be significantly better in a system with few floating point tasks
651 *  because it reduces the total number of save and restore FP context
652 *  operations.  However, this algorithm can not be used on all CPUs due
653 *  to unpredictable use of FP registers by some compilers for integer
654 *  operations.
655 */
656
657RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
658{
659#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
660#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
661  if ( executing->fp_context != NULL )
662    _Context_Save_fp( &executing->fp_context );
663#endif
664#endif
665}
666
667RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
668{
669#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
670#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
671  if ( (executing->fp_context != NULL) &&
672       !_Thread_Is_allocated_fp( executing ) ) {
673    if ( _Thread_Allocated_fp != NULL )
674      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
675    _Context_Restore_fp( &executing->fp_context );
676    _Thread_Allocated_fp = executing;
677  }
678#else
679  if ( executing->fp_context != NULL )
680    _Context_Restore_fp( &executing->fp_context );
681#endif
682#endif
683}
684
685/**
686 * This routine is invoked when the currently loaded floating
687 * point context is now longer associated with an active thread.
688 */
689
690#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
691RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
692{
693  _Thread_Allocated_fp = NULL;
694}
695#endif
696
697/**
698 * This function returns true if dispatching is disabled, and false
699 * otherwise.
700 */
701
702RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
703{
704  return ( _Thread_Dispatch_necessary );
705}
706
707/**
708 * This function returns true if the_thread is NULL and false otherwise.
709 */
710
711RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
712  const Thread_Control *the_thread
713)
714{
715  return ( the_thread == NULL );
716}
717
718/**
719 * @brief Is proxy blocking.
720 *
721 * status which indicates that a proxy is blocking, and false otherwise.
722 */
723RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
724  uint32_t   code
725)
726{
727  return (code == THREAD_STATUS_PROXY_BLOCKING);
728}
729
730RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
731{
732  /* Idle threads */
733  uint32_t maximum_internal_threads =
734    rtems_configuration_get_maximum_processors();
735
736  /* MPCI thread */
737#if defined(RTEMS_MULTIPROCESSING)
738  if ( _System_state_Is_multiprocessing ) {
739    ++maximum_internal_threads;
740  }
741#endif
742
743  return maximum_internal_threads;
744}
745
746RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
747{
748  return (Thread_Control *)
749    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
750}
751
752/**
753 * @brief Gets the heir of the processor and makes it executing.
754 *
755 * Must be called with interrupts disabled.  The thread dispatch necessary
756 * indicator is cleared as a side-effect.
757 *
758 * @return The heir thread.
759 *
760 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
761 * _Thread_Dispatch_update_heir().
762 */
763RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
764  Per_CPU_Control *cpu_self
765)
766{
767  Thread_Control *heir;
768
769  heir = cpu_self->heir;
770  cpu_self->dispatch_necessary = false;
771  cpu_self->executing = heir;
772
773  return heir;
774}
775
776RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(
777  Thread_Control  *the_thread,
778  Per_CPU_Control *cpu
779)
780{
781  Timestamp_Control last;
782  Timestamp_Control ran;
783
784  last = cpu->cpu_usage_timestamp;
785  _TOD_Get_uptime( &cpu->cpu_usage_timestamp );
786  _Timestamp_Subtract( &last, &cpu->cpu_usage_timestamp, &ran );
787  _Timestamp_Add_to( &the_thread->cpu_time_used, &ran );
788}
789
790#if defined( RTEMS_SMP )
791RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
792  Per_CPU_Control *cpu_self,
793  Per_CPU_Control *cpu_for_heir,
794  Thread_Control  *heir
795)
796{
797  _Thread_Update_CPU_time_used( cpu_for_heir->heir, cpu_for_heir );
798
799  cpu_for_heir->heir = heir;
800
801  if ( cpu_for_heir == cpu_self ) {
802    cpu_self->dispatch_necessary = true;
803  } else {
804    _Per_CPU_Send_interrupt( cpu_for_heir );
805  }
806}
807#endif
808
809void _Thread_Get_CPU_time_used(
810  Thread_Control    *the_thread,
811  Timestamp_Control *cpu_time_used
812);
813
814RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
815  Thread_Action_control *action_control
816)
817{
818  _Chain_Initialize_empty( &action_control->Chain );
819}
820
821RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
822  Thread_Action *action
823)
824{
825  _Chain_Set_off_chain( &action->Node );
826}
827
828RTEMS_INLINE_ROUTINE Per_CPU_Control *
829  _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level )
830{
831  Per_CPU_Control *cpu;
832
833  _ISR_Disable_without_giant( *level );
834  cpu = _Per_CPU_Get();
835  _Per_CPU_Acquire( cpu );
836
837  return cpu;
838}
839
840RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire(
841  Thread_Control *thread,
842  ISR_Level      *level
843)
844{
845  Per_CPU_Control *cpu;
846
847  _ISR_Disable_without_giant( *level );
848  cpu = _Thread_Get_CPU( thread );
849  _Per_CPU_Acquire( cpu );
850
851  return cpu;
852}
853
854RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable(
855  Per_CPU_Control *cpu,
856  ISR_Level level
857)
858{
859  _Per_CPU_Release_and_ISR_enable( cpu, level );
860}
861
862RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
863  Thread_Control        *thread,
864  Thread_Action         *action,
865  Thread_Action_handler  handler
866)
867{
868  Per_CPU_Control *cpu_of_thread;
869  ISR_Level        level;
870
871  cpu_of_thread = _Thread_Action_ISR_disable_and_acquire( thread, &level );
872
873  action->handler = handler;
874
875#if defined(RTEMS_SMP)
876  if ( _Per_CPU_Get() == cpu_of_thread ) {
877    cpu_of_thread->dispatch_necessary = true;
878  } else {
879    _Per_CPU_Send_interrupt( cpu_of_thread );
880  }
881#else
882  cpu_of_thread->dispatch_necessary = true;
883#endif
884
885  _Chain_Append_if_is_off_chain_unprotected(
886    &thread->Post_switch_actions.Chain,
887    &action->Node
888  );
889
890  _Thread_Action_release_and_ISR_enable( cpu_of_thread, level );
891}
892
893RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
894  Thread_Life_state life_state
895)
896{
897  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
898}
899
900RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
901  Thread_Life_state life_state
902)
903{
904  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
905}
906
907RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected(
908  Thread_Life_state life_state
909)
910{
911  return ( life_state & THREAD_LIFE_PROTECTED ) != 0;
912}
913
914RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
915  Thread_Life_state life_state
916)
917{
918  return ( life_state & THREAD_LIFE_RESTARTING_TERMINATING ) != 0;
919}
920
921/**
922 * @brief Returns true if the thread owns resources, and false otherwise.
923 *
924 * Resources are accounted with the Thread_Control::resource_count resource
925 * counter.  This counter is used by semaphore objects for example.
926 *
927 * In addition to the resource counter there is a resource dependency tree
928 * available on SMP configurations.  In case this tree is non-empty, then the
929 * thread owns resources.
930 *
931 * @param[in] the_thread The thread.
932 */
933RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
934  const Thread_Control *the_thread
935)
936{
937  bool owns_resources = the_thread->resource_count != 0;
938
939#if defined(RTEMS_SMP)
940  owns_resources = owns_resources
941    || _Resource_Node_owns_resources( &the_thread->Resource_node );
942#endif
943
944  return owns_resources;
945}
946
947/**
948 * @brief Acquires the default thread lock inside a critical section
949 * (interrupts disabled).
950 *
951 * @param[in] the_thread The thread.
952 * @param[in] lock_context The lock context used for the corresponding lock
953 * release.
954 *
955 * @see _Thread_Lock_release_default().
956 */
957RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default_critical(
958  Thread_Control   *the_thread,
959  ISR_lock_Context *lock_context
960)
961{
962  _Assert( _ISR_Get_level() != 0 );
963#if defined(RTEMS_SMP)
964  _SMP_ticket_lock_Acquire(
965    &the_thread->Lock.Default,
966    &_Thread_Executing->Lock.Stats,
967    &lock_context->Lock_context.Stats_context
968  );
969#else
970  (void) the_thread;
971  (void) lock_context;
972#endif
973}
974
975/**
976 * @brief Acquires the default thread lock and returns the executing thread.
977 *
978 * @param[in] lock_context The lock context used for the corresponding lock
979 * release.
980 *
981 * @return The executing thread.
982 *
983 * @see _Thread_Lock_release_default().
984 */
985RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Lock_acquire_default_for_executing(
986  ISR_lock_Context *lock_context
987)
988{
989  Thread_Control *executing;
990
991  _ISR_lock_ISR_disable( lock_context );
992  executing = _Thread_Executing;
993  _Thread_Lock_acquire_default_critical( executing, lock_context );
994
995  return executing;
996}
997
998/**
999 * @brief Acquires the default thread lock.
1000 *
1001 * @param[in] the_thread The thread.
1002 * @param[in] lock_context The lock context used for the corresponding lock
1003 * release.
1004 *
1005 * @see _Thread_Lock_release_default().
1006 */
1007RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default(
1008  Thread_Control   *the_thread,
1009  ISR_lock_Context *lock_context
1010)
1011{
1012  _ISR_lock_ISR_disable( lock_context );
1013  _Thread_Lock_acquire_default_critical( the_thread, lock_context );
1014}
1015
1016/**
1017 * @brief Releases the thread lock inside a critical section (interrupts
1018 * disabled).
1019 *
1020 * The previous interrupt status is not restored.
1021 *
1022 * @param[in] lock The lock.
1023 * @param[in] lock_context The lock context used for the corresponding lock
1024 * acquire.
1025 */
1026RTEMS_INLINE_ROUTINE void _Thread_Lock_release_critical(
1027  void             *lock,
1028  ISR_lock_Context *lock_context
1029)
1030{
1031#if defined(RTEMS_SMP)
1032  _SMP_ticket_lock_Release(
1033    (SMP_ticket_lock_Control *) lock,
1034    &lock_context->Lock_context.Stats_context
1035  );
1036#else
1037  (void) lock;
1038  (void) lock_context;
1039#endif
1040}
1041
1042/**
1043 * @brief Releases the thread lock.
1044 *
1045 * @param[in] lock The lock returned by _Thread_Lock_acquire().
1046 * @param[in] lock_context The lock context used for _Thread_Lock_acquire().
1047 */
1048RTEMS_INLINE_ROUTINE void _Thread_Lock_release(
1049  void             *lock,
1050  ISR_lock_Context *lock_context
1051)
1052{
1053  _Thread_Lock_release_critical( lock, lock_context );
1054  _ISR_lock_ISR_enable( lock_context );
1055}
1056
1057/**
1058 * @brief Releases the default thread lock inside a critical section
1059 * (interrupts disabled).
1060 *
1061 * The previous interrupt status is not restored.
1062 *
1063 * @param[in] the_thread The thread.
1064 * @param[in] lock_context The lock context used for the corresponding lock
1065 * acquire.
1066 */
1067RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default_critical(
1068  Thread_Control   *the_thread,
1069  ISR_lock_Context *lock_context
1070)
1071{
1072  _Thread_Lock_release_critical(
1073#if defined(RTEMS_SMP)
1074    &the_thread->Lock.Default,
1075#else
1076    NULL,
1077#endif
1078    lock_context
1079  );
1080}
1081
1082/**
1083 * @brief Releases the default thread lock.
1084 *
1085 * @param[in] the_thread The thread.
1086 * @param[in] lock_context The lock context used for the corresponding lock
1087 * acquire.
1088 */
1089RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default(
1090  Thread_Control   *the_thread,
1091  ISR_lock_Context *lock_context
1092)
1093{
1094  _Thread_Lock_release_default_critical( the_thread, lock_context );
1095  _ISR_lock_ISR_enable( lock_context );
1096}
1097
1098/**
1099 * @brief Acquires the thread lock.
1100 *
1101 * @param[in] the_thread The thread.
1102 * @param[in] lock_context The lock context for _Thread_Lock_release().
1103 *
1104 * @return The lock required by _Thread_Lock_release().
1105 */
1106RTEMS_INLINE_ROUTINE void *_Thread_Lock_acquire(
1107  Thread_Control   *the_thread,
1108  ISR_lock_Context *lock_context
1109)
1110{
1111#if defined(RTEMS_SMP)
1112  SMP_ticket_lock_Control *lock;
1113
1114  while ( true ) {
1115    unsigned int first_generation;
1116    unsigned int second_generation;
1117
1118    _ISR_lock_ISR_disable( lock_context );
1119
1120    /*
1121     * Ensure that we read our first lock generation before we obtain our
1122     * current lock.  See _Thread_Lock_set_unprotected().
1123     */
1124    first_generation = _Atomic_Load_uint(
1125      &the_thread->Lock.generation,
1126      ATOMIC_ORDER_ACQUIRE
1127    );
1128
1129    lock = the_thread->Lock.current;
1130    _SMP_ticket_lock_Acquire(
1131      lock,
1132      &_Thread_Executing->Lock.Stats,
1133      &lock_context->Lock_context.Stats_context
1134    );
1135
1136    /*
1137     * The C11 memory model doesn't guarantee that we read the latest
1138     * generation here.  For this a read-modify-write operation would be
1139     * necessary.  We read at least the new generation set up by the owner of
1140     * our current thread lock, and so on.
1141     */
1142    second_generation = _Atomic_Load_uint(
1143      &the_thread->Lock.generation,
1144      ATOMIC_ORDER_ACQUIRE
1145    );
1146
1147    if ( first_generation == second_generation ) {
1148      return lock;
1149    }
1150
1151    _Thread_Lock_release( lock, lock_context );
1152  }
1153#else
1154  _ISR_Disable( lock_context->isr_level );
1155
1156  return NULL;
1157#endif
1158}
1159
1160#if defined(RTEMS_SMP)
1161/*
1162 * Internal function, use _Thread_Lock_set() or _Thread_Lock_restore_default()
1163 * instead.
1164 */
1165RTEMS_INLINE_ROUTINE void _Thread_Lock_set_unprotected(
1166  Thread_Control          *the_thread,
1167  SMP_ticket_lock_Control *new_lock
1168)
1169{
1170  the_thread->Lock.current = new_lock;
1171
1172  /*
1173   * The generation release corresponds to the generation acquire in
1174   * _Thread_Lock_acquire() and ensures that the new lock and other fields are
1175   * visible to the next thread lock owner.  Otherwise someone would be able to
1176   * read an up to date generation number and an old lock.  See
1177   * _Thread_Wait_set_queue() and _Thread_Wait_restore_default_operations().
1178   *
1179   * Since we set a new lock right before, this increment is not protected by a
1180   * lock and thus must be an atomic operation.
1181   */
1182  _Atomic_Fetch_add_uint(
1183    &the_thread->Lock.generation,
1184    1,
1185    ATOMIC_ORDER_RELEASE
1186  );
1187}
1188#endif
1189
1190/**
1191 * @brief Sets a new thread lock.
1192 *
1193 * The caller must not be the owner of the default thread lock.  The caller
1194 * must be the owner of the new lock.
1195 *
1196 * @param[in] the_thread The thread.
1197 * @param[in] new_lock The new thread lock.
1198 */
1199#if defined(RTEMS_SMP)
1200RTEMS_INLINE_ROUTINE void _Thread_Lock_set(
1201  Thread_Control          *the_thread,
1202  SMP_ticket_lock_Control *new_lock
1203)
1204{
1205  ISR_lock_Context lock_context;
1206
1207  _Thread_Lock_acquire_default_critical( the_thread, &lock_context );
1208  _Assert( the_thread->Lock.current == &the_thread->Lock.Default );
1209  _Thread_Lock_set_unprotected( the_thread, new_lock );
1210  _Thread_Lock_release_default_critical( the_thread, &lock_context );
1211}
1212#else
1213#define _Thread_Lock_set( the_thread, new_lock ) \
1214  do { } while ( 0 )
1215#endif
1216
1217/**
1218 * @brief Restores the default thread lock.
1219 *
1220 * The caller must be the owner of the current thread lock.
1221 *
1222 * @param[in] the_thread The thread.
1223 */
1224#if defined(RTEMS_SMP)
1225RTEMS_INLINE_ROUTINE void _Thread_Lock_restore_default(
1226  Thread_Control *the_thread
1227)
1228{
1229  _Thread_Lock_set_unprotected( the_thread, &the_thread->Lock.Default );
1230}
1231#else
1232#define _Thread_Lock_restore_default( the_thread ) \
1233  do { } while ( 0 )
1234#endif
1235
1236/**
1237 * @brief The initial thread wait flags value set by _Thread_Initialize().
1238 */
1239#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1240
1241/**
1242 * @brief Mask to get the thread wait state flags.
1243 */
1244#define THREAD_WAIT_STATE_MASK 0xffU
1245
1246/**
1247 * @brief Indicates that the thread begins with the blocking operation.
1248 *
1249 * A blocking operation consists of an optional watchdog initialization and the
1250 * setting of the appropriate thread blocking state with the corresponding
1251 * scheduler block operation.
1252 */
1253#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1254
1255/**
1256 * @brief Indicates that the thread completed the blocking operation.
1257 */
1258#define THREAD_WAIT_STATE_BLOCKED 0x2U
1259
1260/**
1261 * @brief Indicates that a condition to end the thread wait occurred.
1262 *
1263 * This could be a timeout, a signal, an event or a resource availability.
1264 */
1265#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1266
1267/**
1268 * @brief Mask to get the thread wait class flags.
1269 */
1270#define THREAD_WAIT_CLASS_MASK 0xff00U
1271
1272/**
1273 * @brief Indicates that the thread waits for an event.
1274 */
1275#define THREAD_WAIT_CLASS_EVENT 0x100U
1276
1277/**
1278 * @brief Indicates that the thread waits for a system event.
1279 */
1280#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1281
1282/**
1283 * @brief Indicates that the thread waits for an object.
1284 */
1285#define THREAD_WAIT_CLASS_OBJECT 0x400U
1286
1287/**
1288 * @brief Indicates that the thread waits for a period.
1289 */
1290#define THREAD_WAIT_CLASS_PERIOD 0x800U
1291
1292RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1293  Thread_Control    *the_thread,
1294  Thread_Wait_flags  flags
1295)
1296{
1297#if defined(RTEMS_SMP)
1298  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1299#else
1300  the_thread->Wait.flags = flags;
1301#endif
1302}
1303
1304RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1305  const Thread_Control *the_thread
1306)
1307{
1308#if defined(RTEMS_SMP)
1309  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1310#else
1311  return the_thread->Wait.flags;
1312#endif
1313}
1314
1315/**
1316 * @brief Tries to change the thread wait flags inside a critical section
1317 * (interrupts disabled).
1318 *
1319 * In case the wait flags are equal to the expected wait flags, then the wait
1320 * flags are set to the desired wait flags.
1321 *
1322 * @param[in] the_thread The thread.
1323 * @param[in] expected_flags The expected wait flags.
1324 * @param[in] desired_flags The desired wait flags.
1325 *
1326 * @retval true The wait flags were equal to the expected wait flags.
1327 * @retval false Otherwise.
1328 */
1329RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_critical(
1330  Thread_Control    *the_thread,
1331  Thread_Wait_flags  expected_flags,
1332  Thread_Wait_flags  desired_flags
1333)
1334{
1335#if defined(RTEMS_SMP)
1336  return _Atomic_Compare_exchange_uint(
1337    &the_thread->Wait.flags,
1338    &expected_flags,
1339    desired_flags,
1340    ATOMIC_ORDER_RELAXED,
1341    ATOMIC_ORDER_RELAXED
1342  );
1343#else
1344  bool success = the_thread->Wait.flags == expected_flags;
1345
1346  if ( success ) {
1347    the_thread->Wait.flags = desired_flags;
1348  }
1349
1350  return success;
1351#endif
1352}
1353
1354/**
1355 * @brief Tries to change the thread wait flags.
1356 *
1357 * @see _Thread_Wait_flags_try_change_critical().
1358 */
1359RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change(
1360  Thread_Control    *the_thread,
1361  Thread_Wait_flags  expected_flags,
1362  Thread_Wait_flags  desired_flags
1363)
1364{
1365  bool success;
1366#if !defined(RTEMS_SMP)
1367  ISR_Level level;
1368
1369  _ISR_Disable_without_giant( level );
1370#endif
1371
1372  success = _Thread_Wait_flags_try_change_critical(
1373    the_thread,
1374    expected_flags,
1375    desired_flags
1376  );
1377
1378#if !defined(RTEMS_SMP)
1379  _ISR_Enable_without_giant( level );
1380#endif
1381
1382  return success;
1383}
1384
1385/**
1386 * @brief Sets the thread queue.
1387 *
1388 * The caller must be the owner of the thread lock.
1389 *
1390 * @param[in] the_thread The thread.
1391 * @param[in] new_queue The new queue.
1392 *
1393 * @see _Thread_Lock_set().
1394 */
1395RTEMS_INLINE_ROUTINE void _Thread_Wait_set_queue(
1396  Thread_Control     *the_thread,
1397  Thread_queue_Queue *new_queue
1398)
1399{
1400  the_thread->Wait.queue = new_queue;
1401}
1402
1403/**
1404 * @brief Sets the thread queue operations.
1405 *
1406 * The caller must be the owner of the thread lock.
1407 *
1408 * @param[in] the_thread The thread.
1409 * @param[in] new_operations The new queue operations.
1410 *
1411 * @see _Thread_Lock_set() and _Thread_Wait_restore_default_operations().
1412 */
1413RTEMS_INLINE_ROUTINE void _Thread_Wait_set_operations(
1414  Thread_Control                *the_thread,
1415  const Thread_queue_Operations *new_operations
1416)
1417{
1418  the_thread->Wait.operations = new_operations;
1419}
1420
1421/**
1422 * @brief Restores the default thread queue operations.
1423 *
1424 * The caller must be the owner of the thread lock.
1425 *
1426 * @param[in] the_thread The thread.
1427 *
1428 * @see _Thread_Wait_set_operations().
1429 */
1430RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default_operations(
1431  Thread_Control *the_thread
1432)
1433{
1434  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1435}
1436
1437/**
1438 * @brief Sets the thread wait timeout code.
1439 *
1440 * @param[in] the_thread The thread.
1441 * @param[in] timeout_code The new thread wait timeout code.
1442 */
1443RTEMS_INLINE_ROUTINE void _Thread_Wait_set_timeout_code(
1444  Thread_Control *the_thread,
1445  uint32_t        timeout_code
1446)
1447{
1448  the_thread->Wait.timeout_code = timeout_code;
1449}
1450
1451/**
1452 * @brief Helper structure to ensure that all objects containing a thread queue
1453 * have the right layout.
1454 *
1455 * @see _Thread_Wait_get_id() and THREAD_WAIT_QUEUE_OBJECT_ASSERT().
1456 */
1457typedef struct {
1458  Objects_Control      Object;
1459  Thread_queue_Control Wait_queue;
1460} Thread_Wait_queue_object;
1461
1462#define THREAD_WAIT_QUEUE_OBJECT_ASSERT( object_type, wait_queue_member ) \
1463  RTEMS_STATIC_ASSERT( \
1464    offsetof( object_type, wait_queue_member ) \
1465      == offsetof( Thread_Wait_queue_object, Wait_queue ) \
1466    && ( &( ( (object_type *) 0 )->wait_queue_member ) \
1467      == ( &( (Thread_Wait_queue_object *) 0 )->Wait_queue ) ), \
1468    object_type \
1469  )
1470
1471/**
1472 * @brief Returns the object identifier of the object containing the current
1473 * thread wait queue.
1474 *
1475 * This function may be used for debug and system information purposes.  The
1476 * caller must be the owner of the thread lock.
1477 *
1478 * @retval 0 The thread waits on no thread queue currently, the thread wait
1479 *   queue is not contained in an object, or the current thread state provides
1480 *   insufficient information, e.g. the thread is in the middle of a blocking
1481 *   operation.
1482 * @retval other The object identifier of the object containing the thread wait
1483 *   queue.
1484 */
1485Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
1486
1487/**
1488 * @brief General purpose thread wait timeout.
1489 *
1490 * @param[in] watchdog The thread timer watchdog.
1491 */
1492void _Thread_Timeout( Watchdog_Control *watchdog );
1493
1494RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
1495  Thread_Timer_information *timer,
1496  Per_CPU_Control          *cpu
1497)
1498{
1499  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
1500  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1501  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
1502}
1503
1504RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative(
1505  Thread_Control                 *the_thread,
1506  Per_CPU_Control                *cpu,
1507  Watchdog_Service_routine_entry  routine,
1508  Watchdog_Interval               ticks
1509)
1510{
1511  ISR_lock_Context lock_context;
1512
1513  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1514
1515  the_thread->Timer.header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
1516  the_thread->Timer.Watchdog.routine = routine;
1517  _Watchdog_Per_CPU_insert_relative( &the_thread->Timer.Watchdog, cpu, ticks );
1518
1519  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1520}
1521
1522RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_absolute(
1523  Thread_Control                 *the_thread,
1524  Per_CPU_Control                *cpu,
1525  Watchdog_Service_routine_entry  routine,
1526  uint64_t                        expire
1527)
1528{
1529  ISR_lock_Context lock_context;
1530
1531  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1532
1533  the_thread->Timer.header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_ABSOLUTE ];
1534  the_thread->Timer.Watchdog.routine = routine;
1535  _Watchdog_Per_CPU_insert_absolute( &the_thread->Timer.Watchdog, cpu, expire );
1536
1537  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1538}
1539
1540RTEMS_INLINE_ROUTINE void _Thread_Timer_remove( Thread_Control *the_thread )
1541{
1542  ISR_lock_Context lock_context;
1543
1544  _ISR_lock_ISR_disable_and_acquire( &the_thread->Timer.Lock, &lock_context );
1545
1546  _Watchdog_Per_CPU_remove(
1547    &the_thread->Timer.Watchdog,
1548#if defined(RTEMS_SMP)
1549    the_thread->Timer.Watchdog.cpu,
1550#else
1551    _Per_CPU_Get(),
1552#endif
1553    the_thread->Timer.header
1554  );
1555
1556  _ISR_lock_Release_and_ISR_enable( &the_thread->Timer.Lock, &lock_context );
1557}
1558
1559RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
1560  Thread_Control  *the_thread,
1561  Per_CPU_Control *cpu
1562)
1563{
1564#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
1565  the_thread->Scheduler.debug_real_cpu = cpu;
1566#else
1567  (void) the_thread;
1568  (void) cpu;
1569#endif
1570}
1571
1572/** @}*/
1573
1574#ifdef __cplusplus
1575}
1576#endif
1577
1578#if defined(RTEMS_MULTIPROCESSING)
1579#include <rtems/score/threadmp.h>
1580#endif
1581
1582#endif
1583/* end of include file */
Note: See TracBrowser for help on using the repository browser.