source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 38b59a6

4.115
Last change on this file since 38b59a6 was 38b59a6, checked in by Sebastian Huber <sebastian.huber@…>, on 05/02/14 at 08:31:09

score: Implement forced thread migration

The current implementation of task migration in RTEMS has some
implications with respect to the interrupt latency. It is crucial to
preserve the system invariant that a task can execute on at most one
processor in the system at a time. This is accomplished with a boolean
indicator in the task context. The processor architecture specific
low-level task context switch code will mark that a task context is no
longer executing and waits that the heir context stopped execution
before it restores the heir context and resumes execution of the heir
task. So there is one point in time in which a processor is without a
task. This is essential to avoid cyclic dependencies in case multiple
tasks migrate at once. Otherwise some supervising entity is necessary to
prevent life-locks. Such a global supervisor would lead to scalability
problems so this approach is not used. Currently the thread dispatch is
performed with interrupts disabled. So in case the heir task is
currently executing on another processor then this prolongs the time of
disabled interrupts since one processor has to wait for another
processor to make progress.

It is difficult to avoid this issue with the interrupt latency since
interrupts normally store the context of the interrupted task on its
stack. In case a task is marked as not executing we must not use its
task stack to store such an interrupt context. We cannot use the heir
stack before it stopped execution on another processor. So if we enable
interrupts during this transition we have to provide an alternative task
independent stack for this time frame. This issue needs further
investigation.

  • Property mode set to 100644
File size: 21.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/chainimpl.h>
26#include <rtems/score/interr.h>
27#include <rtems/score/isr.h>
28#include <rtems/score/objectimpl.h>
29#include <rtems/score/statesimpl.h>
30#include <rtems/score/sysstate.h>
31#include <rtems/score/todimpl.h>
32#include <rtems/config.h>
33
34#ifdef __cplusplus
35extern "C" {
36#endif
37
38/**
39 * @addtogroup ScoreThread
40 */
41/**@{**/
42
43/**
44 *  The following structure contains the information necessary to manage
45 *  a thread which it is  waiting for a resource.
46 */
47#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
48
49/**
50 *  Self for the GNU Ada Run-Time
51 */
52SCORE_EXTERN void *rtems_ada_self;
53
54/**
55 *  The following defines the information control block used to
56 *  manage this class of objects.
57 */
58SCORE_EXTERN Objects_Information _Thread_Internal_information;
59
60/**
61 *  The following points to the thread whose floating point
62 *  context is currently loaded.
63 */
64#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
65SCORE_EXTERN Thread_Control *_Thread_Allocated_fp;
66#endif
67
68#if !defined(__DYNAMIC_REENT__)
69/**
70 * The C library re-enter-rant global pointer. Some C library implementations
71 * such as newlib have a single global pointer that changed during a context
72 * switch. The pointer points to that global pointer. The Thread control block
73 * holds a pointer to the task specific data.
74 */
75SCORE_EXTERN struct _reent **_Thread_libc_reent;
76#endif
77
78/**
79 *  @brief Initialize thread handler.
80 *
81 *  This routine performs the initialization necessary for this handler.
82 */
83void _Thread_Handler_initialization(void);
84
85/**
86 *  @brief Create idle thread.
87 *
88 *  This routine creates the idle thread.
89 *
90 *  @warning No thread should be created before this one.
91 */
92void _Thread_Create_idle(void);
93
94/**
95 *  @brief Start thread multitasking.
96 *
97 *  This routine initiates multitasking.  It is invoked only as
98 *  part of initialization and its invocation is the last act of
99 *  the non-multitasking part of the system initialization.
100 */
101void _Thread_Start_multitasking( void ) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
102
103/**
104 *  @brief Allocate the requested stack space for the thread.
105 *
106 *  Allocate the requested stack space for the thread.
107 *  Set the Start.stack field to the address of the stack.
108 *
109 *  @param[in] the_thread is the thread where the stack space is requested
110 *
111 *  @retval actual size allocated after any adjustment
112 *  @retval zero if the allocation failed
113 */
114size_t _Thread_Stack_Allocate(
115  Thread_Control *the_thread,
116  size_t          stack_size
117);
118
119/**
120 *  @brief Deallocate thread stack.
121 *
122 *  Deallocate the Thread's stack.
123 */
124void _Thread_Stack_Free(
125  Thread_Control *the_thread
126);
127
128/**
129 *  @brief Initialize thread.
130 *
131 *  This routine initializes the specified the thread.  It allocates
132 *  all memory associated with this thread.  It completes by adding
133 *  the thread to the local object table so operations on this
134 *  thread id are allowed.
135 *
136 *  @note If stack_area is NULL, it is allocated from the workspace.
137 *
138 *  @note If the stack is allocated from the workspace, then it is
139 *        guaranteed to be of at least minimum size.
140 */
141bool _Thread_Initialize(
142  Objects_Information                  *information,
143  Thread_Control                       *the_thread,
144  const struct Scheduler_Control       *scheduler,
145  void                                 *stack_area,
146  size_t                                stack_size,
147  bool                                  is_fp,
148  Priority_Control                      priority,
149  bool                                  is_preemptible,
150  Thread_CPU_budget_algorithms          budget_algorithm,
151  Thread_CPU_budget_algorithm_callout   budget_callout,
152  uint32_t                              isr_level,
153  Objects_Name                          name
154);
155
156/**
157 *  @brief Initializes thread and executes it.
158 *
159 *  This routine initializes the executable information for a thread
160 *  and makes it ready to execute.  After this routine executes, the
161 *  thread competes with all other threads for CPU time.
162 *
163 *  @param the_thread is the thread to be initialized
164 *  @param the_prototype
165 *  @param entry_point
166 *  @param pointer_argument
167 *  @param numeric_argument
168 *  @param[in,out] cpu The processor if used to start an idle thread
169 *  during system initialization.  Must be set to @c NULL to start a normal
170 *  thread.
171 */
172bool _Thread_Start(
173  Thread_Control            *the_thread,
174  Thread_Start_types         the_prototype,
175  void                      *entry_point,
176  void                      *pointer_argument,
177  Thread_Entry_numeric_type  numeric_argument,
178  Per_CPU_Control           *cpu
179);
180
181bool _Thread_Restart(
182  Thread_Control            *the_thread,
183  Thread_Control            *executing,
184  void                      *pointer_argument,
185  Thread_Entry_numeric_type  numeric_argument
186);
187
188bool _Thread_Set_life_protection( bool protect );
189
190void _Thread_Life_action_handler(
191  Thread_Control  *executing,
192  Thread_Action   *action,
193  Per_CPU_Control *cpu,
194  ISR_Level        level
195);
196
197/**
198 * @brief Kills all zombie threads in the system.
199 *
200 * Threads change into the zombie state as the last step in the thread
201 * termination sequence right before a context switch to the heir thread is
202 * initiated.  Since the thread stack is still in use during this phase we have
203 * to postpone the thread stack reclamation until this point.  On SMP
204 * configurations we may have to busy wait for context switch completion here.
205 */
206void _Thread_Kill_zombies( void );
207
208/**
209 * @brief Closes the thread.
210 *
211 * Closes the thread object and starts the thread termination sequence.  In
212 * case the executing thread is not terminated, then this function waits until
213 * the terminating thread reached the zombie state.
214 */
215void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
216
217/**
218 *  @brief Removes any set states for @a the_thread.
219 *
220 *  This routine removes any set states for @a the_thread.  It performs
221 *  any necessary scheduling operations including the selection of
222 *  a new heir thread.
223 *
224 *  - INTERRUPT LATENCY:
225 *    + ready chain
226 *    + select heir
227 */
228void _Thread_Ready(
229  Thread_Control *the_thread
230);
231
232/**
233 *  @brief Clears the indicated STATES for @a the_thread.
234 *
235 *  This routine clears the indicated STATES for @a the_thread.  It performs
236 *  any necessary scheduling operations including the selection of
237 *  a new heir thread.
238 *
239 *  - INTERRUPT LATENCY:
240 *    + priority map
241 *    + select heir
242 */
243void _Thread_Clear_state(
244  Thread_Control *the_thread,
245  States_Control  state
246);
247
248/**
249 *  @brief Sets the indicated @a state for @a the_thread.
250 *
251 *  This routine sets the indicated @a state for @a the_thread.  It performs
252 *  any necessary scheduling operations including the selection of
253 *  a new heir thread.
254 *
255 *  @param[in] the_thread is the thread to set the state for.
256 *  @param[in] state is the state to set the_thread to.
257 *
258 *  - INTERRUPT LATENCY:
259 *   + ready chain
260 *   + select map
261 */
262void _Thread_Set_state(
263  Thread_Control *the_thread,
264  States_Control  state
265);
266
267/**
268 *  @brief Sets the transient state for a thread.
269 *
270 *  This routine sets the Transient state for @a the_thread.  It performs
271 *  any necessary scheduling operations including the selection of
272 *  a new heir thread.
273 *
274 *  @param[in] the_thread is the thread to preform the action upon.
275 *
276 *  - INTERRUPT LATENCY:
277 *    + single case
278 */
279void _Thread_Set_transient(
280  Thread_Control *the_thread
281);
282
283/**
284 *  @brief Initializes enviroment for a thread.
285 *
286 *  This routine initializes the context of @a the_thread to its
287 *  appropriate starting state.
288 *
289 *  @param[in] the_thread is the pointer to the thread control block.
290 */
291void _Thread_Load_environment(
292  Thread_Control *the_thread
293);
294
295/**
296 *  @brief Wrapper function for all threads.
297 *
298 *  This routine is the wrapper function for all threads.  It is
299 *  the starting point for all threads.  The user provided thread
300 *  entry point is invoked by this routine.  Operations
301 *  which must be performed immediately before and after the user's
302 *  thread executes are found here.
303 *
304 *  @note On entry, it is assumed all interrupts are blocked and that this
305 *  routine needs to set the initial isr level.  This may or may not
306 *  actually be needed by the context switch routine and as a result
307 *  interrupts may already be at there proper level.  Either way,
308 *  setting the initial isr level properly here is safe.
309 */
310void _Thread_Handler( void );
311
312/**
313 *  @brief Ended the delay of a thread.
314 *
315 *  This routine is invoked when a thread must be unblocked at the
316 *  end of a time based delay (i.e. wake after or wake when).
317 *  It is called by the watchdog handler.
318 *
319 *  @param[in] id is the thread id
320 */
321void _Thread_Delay_ended(
322  Objects_Id  id,
323  void       *ignored
324);
325
326/**
327 *  @brief Change the priority of a thread.
328 *
329 *  This routine changes the current priority of @a the_thread to
330 *  @a new_priority.  It performs any necessary scheduling operations
331 *  including the selection of a new heir thread.
332 *
333 *  @param[in] the_thread is the thread to change
334 *  @param[in] new_priority is the priority to set @a the_thread to
335 *  @param[in] prepend_it is a switch to prepend the thread
336 */
337void _Thread_Change_priority (
338  Thread_Control   *the_thread,
339  Priority_Control  new_priority,
340  bool              prepend_it
341);
342
343/**
344 *  @brief Set thread priority.
345 *
346 *  This routine updates the priority related fields in the_thread
347 *  control block to indicate the current priority is now new_priority.
348 */
349void _Thread_Set_priority(
350  Thread_Control   *the_thread,
351  Priority_Control  new_priority
352);
353
354/**
355 *  This routine updates the related suspend fields in the_thread
356 *  control block to indicate the current nested level.
357 */
358#define _Thread_Suspend( _the_thread ) \
359        _Thread_Set_state( _the_thread, STATES_SUSPENDED )
360
361/**
362 *  This routine updates the related suspend fields in the_thread
363 *  control block to indicate the current nested level.  A force
364 *  parameter of true will force a resume and clear the suspend count.
365 */
366#define _Thread_Resume( _the_thread ) \
367        _Thread_Clear_state( _the_thread, STATES_SUSPENDED )
368
369/**
370 *  @brief Maps thread Id to a TCB pointer.
371 *
372 *  This function maps thread IDs to thread control
373 *  blocks.  If ID corresponds to a local thread, then it
374 *  returns the_thread control pointer which maps to ID
375 *  and @a location is set to OBJECTS_LOCAL.  If the thread ID is
376 *  global and resides on a remote node, then location is set
377 *  to OBJECTS_REMOTE, and the_thread is undefined.
378 *  Otherwise, location is set to OBJECTS_ERROR and
379 *  the_thread is undefined.
380 *
381 *  @param[in] id is the id of the thread.
382 *  @param[in] location is the location of the block.
383 *
384 *  @note  The performance of many RTEMS services depends upon
385 *         the quick execution of the "good object" path in this
386 *         routine.  If there is a possibility of saving a few
387 *         cycles off the execution time, this routine is worth
388 *         further optimization attention.
389 */
390Thread_Control *_Thread_Get (
391  Objects_Id         id,
392  Objects_Locations *location
393);
394
395/**
396 *  @brief Cancel a blocking operation due to ISR.
397 *
398 *  This method is used to cancel a blocking operation that was
399 *  satisfied from an ISR while the thread executing was in the
400 *  process of blocking.
401 *
402 *  This method will restore the previous ISR disable level during the cancel
403 *  operation.  Thus it is an implicit _ISR_Enable().
404 *
405 *  @param[in] sync_state is the synchronization state
406 *  @param[in] the_thread is the thread whose blocking is canceled
407 *  @param[in] level is the previous ISR disable level
408 *
409 *  @note This is a rare routine in RTEMS.  It is called with
410 *        interrupts disabled and only when an ISR completed
411 *        a blocking condition in process.
412 */
413void _Thread_blocking_operation_Cancel(
414  Thread_blocking_operation_States  sync_state,
415  Thread_Control                   *the_thread,
416  ISR_Level                         level
417);
418
419RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
420  const Thread_Control *thread
421)
422{
423#if defined(RTEMS_SMP)
424  return thread->cpu;
425#else
426  (void) thread;
427
428  return _Per_CPU_Get();
429#endif
430}
431
432RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
433  Thread_Control *thread,
434  Per_CPU_Control *cpu
435)
436{
437#if defined(RTEMS_SMP)
438  thread->cpu = cpu;
439#else
440  (void) thread;
441  (void) cpu;
442#endif
443}
444
445/**
446 * This function returns true if the_thread is the currently executing
447 * thread, and false otherwise.
448 */
449
450RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
451  const Thread_Control *the_thread
452)
453{
454  return ( the_thread == _Thread_Executing );
455}
456
457#if defined(RTEMS_SMP)
458/**
459 * @brief Returns @true in case the thread executes currently on some processor
460 * in the system, otherwise @a false.
461 *
462 * Do not confuse this with _Thread_Is_executing() which checks only the
463 * current processor.
464 */
465RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
466  const Thread_Control *the_thread
467)
468{
469  return _CPU_Context_Get_is_executing( &the_thread->Registers );
470}
471#endif
472
473/**
474 * This function returns true if the_thread is the heir
475 * thread, and false otherwise.
476 */
477
478RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
479  const Thread_Control *the_thread
480)
481{
482  return ( the_thread == _Thread_Heir );
483}
484
485/**
486 * This routine clears any blocking state for the_thread.  It performs
487 * any necessary scheduling operations including the selection of
488 * a new heir thread.
489 */
490
491RTEMS_INLINE_ROUTINE void _Thread_Unblock (
492  Thread_Control *the_thread
493)
494{
495  _Thread_Clear_state( the_thread, STATES_BLOCKED );
496}
497
498/**
499 * This routine resets the current context of the calling thread
500 * to that of its initial state.
501 */
502
503RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
504{
505#if defined(RTEMS_SMP)
506  ISR_Level level;
507
508  _Giant_Release();
509
510  _ISR_Disable_without_giant( level );
511  ( void ) level;
512#endif
513
514#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
515  if ( executing->fp_context != NULL )
516    _Context_Restore_fp( &executing->fp_context );
517#endif
518
519  _CPU_Context_Restart_self( &executing->Registers );
520}
521
522/**
523 * This function returns true if the floating point context of
524 * the_thread is currently loaded in the floating point unit, and
525 * false otherwise.
526 */
527
528#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
529RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
530  const Thread_Control *the_thread
531)
532{
533  return ( the_thread == _Thread_Allocated_fp );
534}
535#endif
536
537/**
538 * This routine is invoked when the currently loaded floating
539 * point context is now longer associated with an active thread.
540 */
541
542#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
543RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
544{
545  _Thread_Allocated_fp = NULL;
546}
547#endif
548
549/**
550 * This function returns true if dispatching is disabled, and false
551 * otherwise.
552 */
553
554RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
555{
556  return ( _Thread_Dispatch_necessary );
557}
558
559/**
560 * This function returns true if the_thread is NULL and false otherwise.
561 */
562
563RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
564  const Thread_Control *the_thread
565)
566{
567  return ( the_thread == NULL );
568}
569
570/**
571 * @brief Is proxy blocking.
572 *
573 * status which indicates that a proxy is blocking, and false otherwise.
574 */
575RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
576  uint32_t   code
577)
578{
579  return (code == THREAD_STATUS_PROXY_BLOCKING);
580}
581
582RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
583{
584  /* Idle threads */
585  uint32_t maximum_internal_threads =
586    rtems_configuration_get_maximum_processors();
587
588  /* MPCI thread */
589#if defined(RTEMS_MULTIPROCESSING)
590  if ( _System_state_Is_multiprocessing ) {
591    ++maximum_internal_threads;
592  }
593#endif
594
595  return maximum_internal_threads;
596}
597
598RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
599{
600  return (Thread_Control *)
601    _Objects_Allocate_unprotected( &_Thread_Internal_information );
602}
603
604RTEMS_INLINE_ROUTINE void _Thread_Request_dispatch_if_executing(
605  Thread_Control *thread
606)
607{
608#if defined(RTEMS_SMP)
609  if ( _Thread_Is_executing_on_a_processor( thread ) ) {
610    const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
611    Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
612
613    cpu_of_thread->dispatch_necessary = true;
614
615    if ( cpu_of_executing != cpu_of_thread ) {
616      _Per_CPU_Send_interrupt( cpu_of_thread );
617    }
618  }
619#else
620  (void) thread;
621#endif
622}
623
624RTEMS_INLINE_ROUTINE void _Thread_Signal_notification( Thread_Control *thread )
625{
626  if ( _ISR_Is_in_progress() && _Thread_Is_executing( thread ) ) {
627    _Thread_Dispatch_necessary = true;
628  } else {
629#if defined(RTEMS_SMP)
630    if ( _Thread_Is_executing_on_a_processor( thread ) ) {
631      const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
632      Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
633
634      if ( cpu_of_executing != cpu_of_thread ) {
635        cpu_of_thread->dispatch_necessary = true;
636        _Per_CPU_Send_interrupt( cpu_of_thread );
637      }
638    }
639#endif
640  }
641}
642
643/**
644 * @brief Gets the heir of the processor and makes it executing.
645 *
646 * The thread dispatch necessary indicator is cleared as a side-effect.
647 *
648 * @return The heir thread.
649 *
650 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
651 * _Scheduler_SMP_Update_heir().
652 */
653RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
654  Per_CPU_Control *cpu_self
655)
656{
657  Thread_Control *heir;
658
659  cpu_self->dispatch_necessary = false;
660
661#if defined( RTEMS_SMP )
662  /*
663   * It is critical that we first update the dispatch necessary and then the
664   * read the heir so that we don't miss an update by
665   * _Scheduler_SMP_Update_heir().
666   */
667  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
668#endif
669
670  heir = cpu_self->heir;
671  cpu_self->executing = heir;
672
673  return heir;
674}
675
676RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used(
677  Thread_Control *executing,
678  Timestamp_Control *time_of_last_context_switch
679)
680{
681  Timestamp_Control uptime;
682  Timestamp_Control ran;
683
684  _TOD_Get_uptime( &uptime );
685  _Timestamp_Subtract(
686    time_of_last_context_switch,
687    &uptime,
688    &ran
689  );
690  *time_of_last_context_switch = uptime;
691  _Timestamp_Add_to( &executing->cpu_time_used, &ran );
692}
693
694RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
695  Thread_Action_control *action_control
696)
697{
698  _Chain_Initialize_empty( &action_control->Chain );
699}
700
701RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
702  Thread_Action         *action,
703  Thread_Action_handler  handler
704)
705{
706  action->handler = handler;
707  _Chain_Set_off_chain( &action->Node );
708}
709
710RTEMS_INLINE_ROUTINE Per_CPU_Control *
711  _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level )
712{
713  Per_CPU_Control *cpu;
714
715  _ISR_Disable_without_giant( *level );
716  cpu = _Per_CPU_Get();
717  _Per_CPU_Acquire( cpu );
718
719  return cpu;
720}
721
722RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire(
723  Thread_Control *thread,
724  ISR_Level      *level
725)
726{
727  Per_CPU_Control *cpu;
728
729  _ISR_Disable_without_giant( *level );
730  cpu = _Thread_Get_CPU( thread );
731  _Per_CPU_Acquire( cpu );
732
733  return cpu;
734}
735
736RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable(
737  Per_CPU_Control *cpu,
738  ISR_Level level
739)
740{
741  _Per_CPU_Release_and_ISR_enable( cpu, level );
742}
743
744RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
745  Thread_Control *thread,
746  Thread_Action  *action
747)
748{
749  Per_CPU_Control *cpu;
750  ISR_Level        level;
751
752  cpu = _Thread_Action_ISR_disable_and_acquire( thread, &level );
753  _Chain_Append_if_is_off_chain_unprotected(
754    &thread->Post_switch_actions.Chain,
755    &action->Node
756  );
757  _Thread_Action_release_and_ISR_enable( cpu, level );
758}
759
760RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
761  Thread_Life_state life_state
762)
763{
764  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
765}
766
767RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
768  Thread_Life_state life_state
769)
770{
771  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
772}
773
774RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected(
775  Thread_Life_state life_state
776)
777{
778  return ( life_state & THREAD_LIFE_PROTECTED ) != 0;
779}
780
781RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
782  Thread_Life_state life_state
783)
784{
785  return ( life_state & THREAD_LIFE_RESTARTING_TERMINTING ) != 0;
786}
787
788RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
789  Thread_Control  *the_thread,
790  Per_CPU_Control *cpu
791)
792{
793#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
794  the_thread->debug_real_cpu = cpu;
795#else
796  (void) the_thread;
797  (void) cpu;
798#endif
799}
800
801#if !defined(__DYNAMIC_REENT__)
802/**
803 * This routine returns the C library re-enterant pointer.
804 */
805
806RTEMS_INLINE_ROUTINE struct _reent **_Thread_Get_libc_reent( void )
807{
808  return _Thread_libc_reent;
809}
810
811/**
812 * This routine set the C library re-enterant pointer.
813 */
814
815RTEMS_INLINE_ROUTINE void _Thread_Set_libc_reent (
816  struct _reent **libc_reent
817)
818{
819  _Thread_libc_reent = libc_reent;
820}
821#endif
822
823/** @}*/
824
825#ifdef __cplusplus
826}
827#endif
828
829#if defined(RTEMS_MULTIPROCESSING)
830#include <rtems/score/threadmp.h>
831#endif
832
833#endif
834/* end of include file */
Note: See TracBrowser for help on using the repository browser.