source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ dafa5d88

5
Last change on this file since dafa5d88 was dafa5d88, checked in by Sebastian Huber <sebastian.huber@…>, on 09/03/15 at 08:27:16

score: Implement priority boosting

  • Property mode set to 100644
File size: 39.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/resourceimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/sysstate.h>
33#include <rtems/score/threadqimpl.h>
34#include <rtems/score/todimpl.h>
35#include <rtems/score/freechain.h>
36#include <rtems/config.h>
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42/**
43 * @addtogroup ScoreThread
44 */
45/**@{**/
46
47/**
48 *  The following structure contains the information necessary to manage
49 *  a thread which it is  waiting for a resource.
50 */
51#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
52
53/**
54 *  Self for the GNU Ada Run-Time
55 */
56SCORE_EXTERN void *rtems_ada_self;
57
58typedef struct {
59  Objects_Information Objects;
60
61  Freechain_Control Free_thread_queue_heads;
62} Thread_Information;
63
64/**
65 *  The following defines the information control block used to
66 *  manage this class of objects.
67 */
68SCORE_EXTERN Thread_Information _Thread_Internal_information;
69
70/**
71 *  The following points to the thread whose floating point
72 *  context is currently loaded.
73 */
74#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
75SCORE_EXTERN Thread_Control *_Thread_Allocated_fp;
76#endif
77
78#if !defined(__DYNAMIC_REENT__)
79/**
80 * The C library re-enter-rant global pointer. Some C library implementations
81 * such as newlib have a single global pointer that changed during a context
82 * switch. The pointer points to that global pointer. The Thread control block
83 * holds a pointer to the task specific data.
84 */
85SCORE_EXTERN struct _reent **_Thread_libc_reent;
86#endif
87
88#define THREAD_CHAIN_NODE_TO_THREAD( node ) \
89  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.Chain )
90
91#define THREAD_RBTREE_NODE_TO_THREAD( node ) \
92  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.RBTree )
93
94#if defined(RTEMS_SMP)
95#define THREAD_RESOURCE_NODE_TO_THREAD( node ) \
96  RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node )
97#endif
98
99void _Thread_Initialize_information(
100  Thread_Information  *information,
101  Objects_APIs         the_api,
102  uint16_t             the_class,
103  uint32_t             maximum,
104  bool                 is_string,
105  uint32_t             maximum_name_length
106#if defined(RTEMS_MULTIPROCESSING)
107  ,
108  bool                 supports_global
109#endif
110);
111
112/**
113 *  @brief Initialize thread handler.
114 *
115 *  This routine performs the initialization necessary for this handler.
116 */
117void _Thread_Handler_initialization(void);
118
119/**
120 *  @brief Create idle thread.
121 *
122 *  This routine creates the idle thread.
123 *
124 *  @warning No thread should be created before this one.
125 */
126void _Thread_Create_idle(void);
127
128/**
129 *  @brief Start thread multitasking.
130 *
131 *  This routine initiates multitasking.  It is invoked only as
132 *  part of initialization and its invocation is the last act of
133 *  the non-multitasking part of the system initialization.
134 */
135void _Thread_Start_multitasking( void ) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
136
137/**
138 *  @brief Allocate the requested stack space for the thread.
139 *
140 *  Allocate the requested stack space for the thread.
141 *  Set the Start.stack field to the address of the stack.
142 *
143 *  @param[in] the_thread is the thread where the stack space is requested
144 *  @param[in] stack_size is the stack space is requested
145 *
146 *  @retval actual size allocated after any adjustment
147 *  @retval zero if the allocation failed
148 */
149size_t _Thread_Stack_Allocate(
150  Thread_Control *the_thread,
151  size_t          stack_size
152);
153
154/**
155 *  @brief Deallocate thread stack.
156 *
157 *  Deallocate the Thread's stack.
158 */
159void _Thread_Stack_Free(
160  Thread_Control *the_thread
161);
162
163/**
164 *  @brief Initialize thread.
165 *
166 *  This routine initializes the specified the thread.  It allocates
167 *  all memory associated with this thread.  It completes by adding
168 *  the thread to the local object table so operations on this
169 *  thread id are allowed.
170 *
171 *  @note If stack_area is NULL, it is allocated from the workspace.
172 *
173 *  @note If the stack is allocated from the workspace, then it is
174 *        guaranteed to be of at least minimum size.
175 */
176bool _Thread_Initialize(
177  Thread_Information                   *information,
178  Thread_Control                       *the_thread,
179  const struct Scheduler_Control       *scheduler,
180  void                                 *stack_area,
181  size_t                                stack_size,
182  bool                                  is_fp,
183  Priority_Control                      priority,
184  bool                                  is_preemptible,
185  Thread_CPU_budget_algorithms          budget_algorithm,
186  Thread_CPU_budget_algorithm_callout   budget_callout,
187  uint32_t                              isr_level,
188  Objects_Name                          name
189);
190
191/**
192 *  @brief Initializes thread and executes it.
193 *
194 *  This routine initializes the executable information for a thread
195 *  and makes it ready to execute.  After this routine executes, the
196 *  thread competes with all other threads for CPU time.
197 *
198 *  @param the_thread is the thread to be initialized
199 *  @param the_prototype
200 *  @param entry_point
201 *  @param pointer_argument
202 *  @param numeric_argument
203 *  @param[in,out] cpu The processor if used to start an idle thread
204 *  during system initialization.  Must be set to @c NULL to start a normal
205 *  thread.
206 */
207bool _Thread_Start(
208  Thread_Control            *the_thread,
209  Thread_Start_types         the_prototype,
210  void                      *entry_point,
211  void                      *pointer_argument,
212  Thread_Entry_numeric_type  numeric_argument,
213  Per_CPU_Control           *cpu
214);
215
216bool _Thread_Restart(
217  Thread_Control            *the_thread,
218  Thread_Control            *executing,
219  void                      *pointer_argument,
220  Thread_Entry_numeric_type  numeric_argument
221);
222
223void _Thread_Yield( Thread_Control *executing );
224
225bool _Thread_Set_life_protection( bool protect );
226
227void _Thread_Life_action_handler(
228  Thread_Control  *executing,
229  Thread_Action   *action,
230  Per_CPU_Control *cpu,
231  ISR_Level        level
232);
233
234/**
235 * @brief Kills all zombie threads in the system.
236 *
237 * Threads change into the zombie state as the last step in the thread
238 * termination sequence right before a context switch to the heir thread is
239 * initiated.  Since the thread stack is still in use during this phase we have
240 * to postpone the thread stack reclamation until this point.  On SMP
241 * configurations we may have to busy wait for context switch completion here.
242 */
243void _Thread_Kill_zombies( void );
244
245/**
246 * @brief Closes the thread.
247 *
248 * Closes the thread object and starts the thread termination sequence.  In
249 * case the executing thread is not terminated, then this function waits until
250 * the terminating thread reached the zombie state.
251 */
252void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
253
254/**
255 * @brief Clears the specified thread state.
256 *
257 * In case the previous state is a non-ready state and the next state is the
258 * ready state, then the thread is unblocked by the scheduler.
259 *
260 * @param[in] the_thread The thread.
261 * @param[in] state The state to clear.  It must not be zero.
262 *
263 * @return The previous state.
264 */
265States_Control _Thread_Clear_state(
266  Thread_Control *the_thread,
267  States_Control  state
268);
269
270/**
271 * @brief Sets the specified thread state.
272 *
273 * In case the previous state is the ready state, then the thread is blocked by
274 * the scheduler.
275 *
276 * @param[in] the_thread The thread.
277 * @param[in] state The state to set.  It must not be zero.
278 *
279 * @return The previous state.
280 */
281States_Control _Thread_Set_state(
282  Thread_Control *the_thread,
283  States_Control  state
284);
285
286/**
287 * @brief Clears all thread states.
288 *
289 * In case the previous state is a non-ready state, then the thread is
290 * unblocked by the scheduler.
291 *
292 * @param[in] the_thread The thread.
293 */
294RTEMS_INLINE_ROUTINE void _Thread_Ready(
295  Thread_Control *the_thread
296)
297{
298  _Thread_Clear_state( the_thread, STATES_ALL_SET );
299}
300
301/**
302 *  @brief Initializes enviroment for a thread.
303 *
304 *  This routine initializes the context of @a the_thread to its
305 *  appropriate starting state.
306 *
307 *  @param[in] the_thread is the pointer to the thread control block.
308 */
309void _Thread_Load_environment(
310  Thread_Control *the_thread
311);
312
313/**
314 *  @brief Wrapper function for all threads.
315 *
316 *  This routine is the wrapper function for all threads.  It is
317 *  the starting point for all threads.  The user provided thread
318 *  entry point is invoked by this routine.  Operations
319 *  which must be performed immediately before and after the user's
320 *  thread executes are found here.
321 *
322 *  @note On entry, it is assumed all interrupts are blocked and that this
323 *  routine needs to set the initial isr level.  This may or may not
324 *  actually be needed by the context switch routine and as a result
325 *  interrupts may already be at there proper level.  Either way,
326 *  setting the initial isr level properly here is safe.
327 */
328void _Thread_Handler( void );
329
330/**
331 * @brief Executes the global constructors and then restarts itself as the
332 * first initialization thread.
333 *
334 * The first initialization thread is the first RTEMS initialization task or
335 * the first POSIX initialization thread in case no RTEMS initialization tasks
336 * are present.
337 */
338void *_Thread_Global_construction( void );
339
340/**
341 *  @brief Ended the delay of a thread.
342 *
343 *  This routine is invoked when a thread must be unblocked at the
344 *  end of a time based delay (i.e. wake after or wake when).
345 *  It is called by the watchdog handler.
346 *
347 *  @param[in] id is the thread id
348 *  @param[in] ignored is not used
349 */
350void _Thread_Delay_ended(
351  Objects_Id  id,
352  void       *ignored
353);
354
355/**
356 * @brief Returns true if the left thread priority is less than the right
357 * thread priority in the intuitive sense of priority and false otherwise.
358 */
359RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
360  Priority_Control left,
361  Priority_Control right
362)
363{
364  return left > right;
365}
366
367/**
368 * @brief Returns the highest priority of the left and right thread priorities
369 * in the intuitive sense of priority.
370 */
371RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
372  Priority_Control left,
373  Priority_Control right
374)
375{
376  return _Thread_Priority_less_than( left, right ) ? right : left;
377}
378
379/**
380 * @brief Filters a thread priority change.
381 *
382 * Called by _Thread_Change_priority() under the protection of the thread lock.
383 *
384 * @param[in] the_thread The thread.
385 * @param[in, out] new_priority The new priority of the thread.  The filter may
386 * alter this value.
387 * @param[in] arg The argument passed to _Thread_Change_priority().
388 *
389 * @retval true Change the current priority.
390 * @retval false Otherwise.
391 */
392typedef bool ( *Thread_Change_priority_filter )(
393  Thread_Control   *the_thread,
394  Priority_Control *new_priority,
395  void             *arg
396);
397
398/**
399 * @brief Changes the priority of a thread if allowed by the filter function.
400 *
401 * It changes current priority of the thread to the new priority in case the
402 * filter function returns true.  In this case the scheduler is notified of the
403 * priority change as well.
404 *
405 * @param[in] the_thread The thread.
406 * @param[in] new_priority The new priority of the thread.
407 * @param[in] arg The argument for the filter function.
408 * @param[in] filter The filter function to determine if a priority change is
409 * allowed and optionally perform other actions under the protection of the
410 * thread lock simultaneously with the update of the current priority.
411 * @param[in] prepend_it In case this is true, then the thread is prepended to
412 * its priority group in its scheduler instance, otherwise it is appended.
413 */
414void _Thread_Change_priority(
415  Thread_Control                *the_thread,
416  Priority_Control               new_priority,
417  void                          *arg,
418  Thread_Change_priority_filter  filter,
419  bool                           prepend_it
420);
421
422/**
423 * @brief Raises the priority of a thread.
424 *
425 * It changes the current priority of the thread to the new priority if the new
426 * priority is higher than the current priority.  In this case the thread is
427 * appended to its new priority group in its scheduler instance.
428 *
429 * @param[in] the_thread The thread.
430 * @param[in] new_priority The new priority of the thread.
431 *
432 * @see _Thread_Change_priority().
433 */
434void _Thread_Raise_priority(
435  Thread_Control   *the_thread,
436  Priority_Control  new_priority
437);
438
439/**
440 * @brief Inherit the priority of a thread.
441 *
442 * It changes the current priority of the inheritor thread to the current priority
443 * of the ancestor thread if it is higher than the current priority of the inheritor
444 * thread.  In this case the inheritor thread is appended to its new priority group
445 * in its scheduler instance.
446 *
447 * On SMP configurations, the priority is changed to PRIORITY_PSEUDO_ISR in
448 * case the own schedulers of the inheritor and ancestor thread differ (priority
449 * boosting).
450 *
451 * @param[in] inheritor The thread to inherit the priority.
452 * @param[in] ancestor The thread to bequeath its priority to the inheritor
453 *   thread.
454 */
455#if defined(RTEMS_SMP)
456void _Thread_Inherit_priority(
457  Thread_Control *inheritor,
458  Thread_Control *ancestor
459);
460#else
461RTEMS_INLINE_ROUTINE void _Thread_Inherit_priority(
462  Thread_Control *inheritor,
463  Thread_Control *ancestor
464)
465{
466  _Thread_Raise_priority( inheritor, ancestor->current_priority );
467}
468#endif
469
470/**
471 * @brief Sets the current to the real priority of a thread.
472 *
473 * Sets the priority restore hint to false.
474 */
475void _Thread_Restore_priority( Thread_Control *the_thread );
476
477/**
478 * @brief Sets the priority of a thread.
479 *
480 * It sets the real priority of the thread.  In addition it changes the current
481 * priority of the thread if the new priority is higher than the current
482 * priority or the thread owns no resources.
483 *
484 * @param[in] the_thread The thread.
485 * @param[in] new_priority The new priority of the thread.
486 * @param[out] old_priority The old real priority of the thread.  This pointer
487 * must not be @c NULL.
488 * @param[in] prepend_it In case this is true, then the thread is prepended to
489 * its priority group in its scheduler instance, otherwise it is appended.
490 *
491 * @see _Thread_Change_priority().
492 */
493void _Thread_Set_priority(
494  Thread_Control   *the_thread,
495  Priority_Control  new_priority,
496  Priority_Control *old_priority,
497  bool              prepend_it
498);
499
500/**
501 *  @brief Maps thread Id to a TCB pointer.
502 *
503 *  This function maps thread IDs to thread control
504 *  blocks.  If ID corresponds to a local thread, then it
505 *  returns the_thread control pointer which maps to ID
506 *  and @a location is set to OBJECTS_LOCAL.  If the thread ID is
507 *  global and resides on a remote node, then location is set
508 *  to OBJECTS_REMOTE, and the_thread is undefined.
509 *  Otherwise, location is set to OBJECTS_ERROR and
510 *  the_thread is undefined.
511 *
512 *  @param[in] id is the id of the thread.
513 *  @param[in] location is the location of the block.
514 *
515 *  @note  The performance of many RTEMS services depends upon
516 *         the quick execution of the "good object" path in this
517 *         routine.  If there is a possibility of saving a few
518 *         cycles off the execution time, this routine is worth
519 *         further optimization attention.
520 */
521Thread_Control *_Thread_Get (
522  Objects_Id         id,
523  Objects_Locations *location
524);
525
526/**
527 * @brief Gets a thread by its identifier.
528 *
529 * @see _Objects_Get_isr_disable().
530 */
531Thread_Control *_Thread_Get_interrupt_disable(
532  Objects_Id         id,
533  Objects_Locations *location,
534  ISR_lock_Context  *lock_context
535);
536
537RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
538  const Thread_Control *thread
539)
540{
541#if defined(RTEMS_SMP)
542  return thread->Scheduler.cpu;
543#else
544  (void) thread;
545
546  return _Per_CPU_Get();
547#endif
548}
549
550RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
551  Thread_Control *thread,
552  Per_CPU_Control *cpu
553)
554{
555#if defined(RTEMS_SMP)
556  thread->Scheduler.cpu = cpu;
557#else
558  (void) thread;
559  (void) cpu;
560#endif
561}
562
563/**
564 * This function returns true if the_thread is the currently executing
565 * thread, and false otherwise.
566 */
567
568RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
569  const Thread_Control *the_thread
570)
571{
572  return ( the_thread == _Thread_Executing );
573}
574
575#if defined(RTEMS_SMP)
576/**
577 * @brief Returns @a true in case the thread executes currently on some
578 * processor in the system, otherwise @a false.
579 *
580 * Do not confuse this with _Thread_Is_executing() which checks only the
581 * current processor.
582 */
583RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
584  const Thread_Control *the_thread
585)
586{
587  return _CPU_Context_Get_is_executing( &the_thread->Registers );
588}
589#endif
590
591/**
592 * @brief Returns @a true and sets time_of_context_switch to the
593 * time of the last context switch when the thread is currently executing
594 * in the system, otherwise @a false.
595 */
596RTEMS_INLINE_ROUTINE bool _Thread_Get_time_of_last_context_switch(
597  Thread_Control    *the_thread,
598  Timestamp_Control *time_of_context_switch
599)
600{
601  bool retval = false;
602
603  _Thread_Disable_dispatch();
604  #ifndef RTEMS_SMP
605    if ( _Thread_Executing->Object.id == the_thread->Object.id ) {
606      *time_of_context_switch = _Thread_Time_of_last_context_switch;
607      retval = true;
608    }
609  #else
610    if ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
611      *time_of_context_switch =
612        _Thread_Get_CPU( the_thread )->time_of_last_context_switch;
613      retval = true;
614    }
615  #endif
616  _Thread_Enable_dispatch();
617  return retval;
618}
619
620
621/**
622 * This function returns true if the_thread is the heir
623 * thread, and false otherwise.
624 */
625
626RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
627  const Thread_Control *the_thread
628)
629{
630  return ( the_thread == _Thread_Heir );
631}
632
633/**
634 * This routine clears any blocking state for the_thread.  It performs
635 * any necessary scheduling operations including the selection of
636 * a new heir thread.
637 */
638
639RTEMS_INLINE_ROUTINE void _Thread_Unblock (
640  Thread_Control *the_thread
641)
642{
643  _Thread_Clear_state( the_thread, STATES_BLOCKED );
644}
645
646/**
647 * This routine resets the current context of the calling thread
648 * to that of its initial state.
649 */
650
651RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
652{
653#if defined(RTEMS_SMP)
654  ISR_Level level;
655
656  _Giant_Release( _Per_CPU_Get() );
657
658  _ISR_Disable_without_giant( level );
659  ( void ) level;
660#endif
661
662#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
663  if ( executing->fp_context != NULL )
664    _Context_Restore_fp( &executing->fp_context );
665#endif
666
667  _CPU_Context_Restart_self( &executing->Registers );
668}
669
670/**
671 * This function returns true if the floating point context of
672 * the_thread is currently loaded in the floating point unit, and
673 * false otherwise.
674 */
675
676#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
677RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
678  const Thread_Control *the_thread
679)
680{
681  return ( the_thread == _Thread_Allocated_fp );
682}
683#endif
684
685/*
686 *  If the CPU has hardware floating point, then we must address saving
687 *  and restoring it as part of the context switch.
688 *
689 *  The second conditional compilation section selects the algorithm used
690 *  to context switch between floating point tasks.  The deferred algorithm
691 *  can be significantly better in a system with few floating point tasks
692 *  because it reduces the total number of save and restore FP context
693 *  operations.  However, this algorithm can not be used on all CPUs due
694 *  to unpredictable use of FP registers by some compilers for integer
695 *  operations.
696 */
697
698RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
699{
700#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
701#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
702  if ( executing->fp_context != NULL )
703    _Context_Save_fp( &executing->fp_context );
704#endif
705#endif
706}
707
708RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
709{
710#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
711#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
712  if ( (executing->fp_context != NULL) &&
713       !_Thread_Is_allocated_fp( executing ) ) {
714    if ( _Thread_Allocated_fp != NULL )
715      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
716    _Context_Restore_fp( &executing->fp_context );
717    _Thread_Allocated_fp = executing;
718  }
719#else
720  if ( executing->fp_context != NULL )
721    _Context_Restore_fp( &executing->fp_context );
722#endif
723#endif
724}
725
726/**
727 * This routine is invoked when the currently loaded floating
728 * point context is now longer associated with an active thread.
729 */
730
731#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
732RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
733{
734  _Thread_Allocated_fp = NULL;
735}
736#endif
737
738/**
739 * This function returns true if dispatching is disabled, and false
740 * otherwise.
741 */
742
743RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
744{
745  return ( _Thread_Dispatch_necessary );
746}
747
748/**
749 * This function returns true if the_thread is NULL and false otherwise.
750 */
751
752RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
753  const Thread_Control *the_thread
754)
755{
756  return ( the_thread == NULL );
757}
758
759/**
760 * @brief Is proxy blocking.
761 *
762 * status which indicates that a proxy is blocking, and false otherwise.
763 */
764RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
765  uint32_t   code
766)
767{
768  return (code == THREAD_STATUS_PROXY_BLOCKING);
769}
770
771RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
772{
773  /* Idle threads */
774  uint32_t maximum_internal_threads =
775    rtems_configuration_get_maximum_processors();
776
777  /* MPCI thread */
778#if defined(RTEMS_MULTIPROCESSING)
779  if ( _System_state_Is_multiprocessing ) {
780    ++maximum_internal_threads;
781  }
782#endif
783
784  return maximum_internal_threads;
785}
786
787RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
788{
789  return (Thread_Control *)
790    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
791}
792
793/**
794 * @brief Gets the heir of the processor and makes it executing.
795 *
796 * The thread dispatch necessary indicator is cleared as a side-effect.
797 *
798 * @return The heir thread.
799 *
800 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
801 * _Thread_Dispatch_update_heir().
802 */
803RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
804  Per_CPU_Control *cpu_self
805)
806{
807  Thread_Control *heir;
808
809  cpu_self->dispatch_necessary = false;
810
811#if defined( RTEMS_SMP )
812  /*
813   * It is critical that we first update the dispatch necessary and then the
814   * read the heir so that we don't miss an update by
815   * _Thread_Dispatch_update_heir().
816   */
817  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
818#endif
819
820  heir = cpu_self->heir;
821  cpu_self->executing = heir;
822
823  return heir;
824}
825
826#if defined( RTEMS_SMP )
827RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
828  Per_CPU_Control *cpu_self,
829  Per_CPU_Control *cpu_for_heir,
830  Thread_Control  *heir
831)
832{
833  cpu_for_heir->heir = heir;
834
835  /*
836   * It is critical that we first update the heir and then the dispatch
837   * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
838   * update.
839   */
840  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
841
842  /*
843   * Only update the dispatch necessary indicator if not already set to
844   * avoid superfluous inter-processor interrupts.
845   */
846  if ( !cpu_for_heir->dispatch_necessary ) {
847    cpu_for_heir->dispatch_necessary = true;
848
849    if ( cpu_for_heir != cpu_self ) {
850      _Per_CPU_Send_interrupt( cpu_for_heir );
851    }
852  }
853}
854#endif
855
856RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used(
857  Thread_Control *executing,
858  Timestamp_Control *time_of_last_context_switch
859)
860{
861  Timestamp_Control uptime;
862  Timestamp_Control ran;
863
864  _TOD_Get_uptime( &uptime );
865  _Timestamp_Subtract(
866    time_of_last_context_switch,
867    &uptime,
868    &ran
869  );
870  *time_of_last_context_switch = uptime;
871  _Timestamp_Add_to( &executing->cpu_time_used, &ran );
872}
873
874RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
875  Thread_Action_control *action_control
876)
877{
878  _Chain_Initialize_empty( &action_control->Chain );
879}
880
881RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
882  Thread_Action         *action,
883  Thread_Action_handler  handler
884)
885{
886  action->handler = handler;
887  _Chain_Set_off_chain( &action->Node );
888}
889
890RTEMS_INLINE_ROUTINE Per_CPU_Control *
891  _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level )
892{
893  Per_CPU_Control *cpu;
894
895  _ISR_Disable_without_giant( *level );
896  cpu = _Per_CPU_Get();
897  _Per_CPU_Acquire( cpu );
898
899  return cpu;
900}
901
902RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire(
903  Thread_Control *thread,
904  ISR_Level      *level
905)
906{
907  Per_CPU_Control *cpu;
908
909  _ISR_Disable_without_giant( *level );
910  cpu = _Thread_Get_CPU( thread );
911  _Per_CPU_Acquire( cpu );
912
913  return cpu;
914}
915
916RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable(
917  Per_CPU_Control *cpu,
918  ISR_Level level
919)
920{
921  _Per_CPU_Release_and_ISR_enable( cpu, level );
922}
923
924RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
925  Thread_Control *thread,
926  Thread_Action  *action
927)
928{
929  Per_CPU_Control *cpu_of_thread;
930  ISR_Level        level;
931
932  cpu_of_thread = _Thread_Action_ISR_disable_and_acquire( thread, &level );
933  cpu_of_thread->dispatch_necessary = true;
934
935#if defined(RTEMS_SMP)
936  if ( _Per_CPU_Get() != cpu_of_thread ) {
937    _Per_CPU_Send_interrupt( cpu_of_thread );
938  }
939#endif
940
941  _Chain_Append_if_is_off_chain_unprotected(
942    &thread->Post_switch_actions.Chain,
943    &action->Node
944  );
945
946  _Thread_Action_release_and_ISR_enable( cpu_of_thread, level );
947}
948
949RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
950  Thread_Life_state life_state
951)
952{
953  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
954}
955
956RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
957  Thread_Life_state life_state
958)
959{
960  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
961}
962
963RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected(
964  Thread_Life_state life_state
965)
966{
967  return ( life_state & THREAD_LIFE_PROTECTED ) != 0;
968}
969
970RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
971  Thread_Life_state life_state
972)
973{
974  return ( life_state & THREAD_LIFE_RESTARTING_TERMINATING ) != 0;
975}
976
977/**
978 * @brief Returns true if the thread owns resources, and false otherwise.
979 *
980 * Resources are accounted with the Thread_Control::resource_count resource
981 * counter.  This counter is used by semaphore objects for example.
982 *
983 * In addition to the resource counter there is a resource dependency tree
984 * available on SMP configurations.  In case this tree is non-empty, then the
985 * thread owns resources.
986 *
987 * @param[in] the_thread The thread.
988 */
989RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
990  const Thread_Control *the_thread
991)
992{
993  bool owns_resources = the_thread->resource_count != 0;
994
995#if defined(RTEMS_SMP)
996  owns_resources = owns_resources
997    || _Resource_Node_owns_resources( &the_thread->Resource_node );
998#endif
999
1000  return owns_resources;
1001}
1002
1003/**
1004 * @brief Acquires the default thread lock inside a critical section
1005 * (interrupts disabled).
1006 *
1007 * @param[in] the_thread The thread.
1008 * @param[in] lock_context The lock context used for the corresponding lock
1009 * release.
1010 *
1011 * @see _Thread_Lock_release_default().
1012 */
1013RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default_critical(
1014  Thread_Control   *the_thread,
1015  ISR_lock_Context *lock_context
1016)
1017{
1018  _Assert( _ISR_Get_level() != 0 );
1019#if defined(RTEMS_SMP)
1020  _SMP_ticket_lock_Acquire(
1021    &the_thread->Lock.Default,
1022    &_Thread_Executing->Lock.Stats,
1023    &lock_context->Lock_context.Stats_context
1024  );
1025#else
1026  (void) the_thread;
1027  (void) lock_context;
1028#endif
1029}
1030
1031/**
1032 * @brief Acquires the default thread lock and returns the executing thread.
1033 *
1034 * @param[in] lock_context The lock context used for the corresponding lock
1035 * release.
1036 *
1037 * @return The executing thread.
1038 *
1039 * @see _Thread_Lock_release_default().
1040 */
1041RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Lock_acquire_default_for_executing(
1042  ISR_lock_Context *lock_context
1043)
1044{
1045  Thread_Control *executing;
1046
1047  _ISR_lock_ISR_disable( lock_context );
1048  executing = _Thread_Executing;
1049  _Thread_Lock_acquire_default_critical( executing, lock_context );
1050
1051  return executing;
1052}
1053
1054/**
1055 * @brief Acquires the default thread lock.
1056 *
1057 * @param[in] the_thread The thread.
1058 * @param[in] lock_context The lock context used for the corresponding lock
1059 * release.
1060 *
1061 * @see _Thread_Lock_release_default().
1062 */
1063RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default(
1064  Thread_Control   *the_thread,
1065  ISR_lock_Context *lock_context
1066)
1067{
1068  _ISR_lock_ISR_disable( lock_context );
1069  _Thread_Lock_acquire_default_critical( the_thread, lock_context );
1070}
1071
1072/**
1073 * @brief Releases the thread lock inside a critical section (interrupts
1074 * disabled).
1075 *
1076 * The previous interrupt status is not restored.
1077 *
1078 * @param[in] lock The lock.
1079 * @param[in] lock_context The lock context used for the corresponding lock
1080 * acquire.
1081 */
1082RTEMS_INLINE_ROUTINE void _Thread_Lock_release_critical(
1083  void             *lock,
1084  ISR_lock_Context *lock_context
1085)
1086{
1087#if defined(RTEMS_SMP)
1088  _SMP_ticket_lock_Release(
1089    lock,
1090    &lock_context->Lock_context.Stats_context
1091  );
1092#else
1093  (void) lock;
1094  (void) lock_context;
1095#endif
1096}
1097
1098/**
1099 * @brief Releases the thread lock.
1100 *
1101 * @param[in] lock The lock returned by _Thread_Lock_acquire().
1102 * @param[in] lock_context The lock context used for _Thread_Lock_acquire().
1103 */
1104RTEMS_INLINE_ROUTINE void _Thread_Lock_release(
1105  void             *lock,
1106  ISR_lock_Context *lock_context
1107)
1108{
1109  _Thread_Lock_release_critical( lock, lock_context );
1110  _ISR_lock_ISR_enable( lock_context );
1111}
1112
1113/**
1114 * @brief Releases the default thread lock inside a critical section
1115 * (interrupts disabled).
1116 *
1117 * The previous interrupt status is not restored.
1118 *
1119 * @param[in] the_thread The thread.
1120 * @param[in] lock_context The lock context used for the corresponding lock
1121 * acquire.
1122 */
1123RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default_critical(
1124  Thread_Control   *the_thread,
1125  ISR_lock_Context *lock_context
1126)
1127{
1128  _Thread_Lock_release_critical(
1129#if defined(RTEMS_SMP)
1130    &the_thread->Lock.Default,
1131#else
1132    NULL,
1133#endif
1134    lock_context
1135  );
1136}
1137
1138/**
1139 * @brief Releases the default thread lock.
1140 *
1141 * @param[in] the_thread The thread.
1142 * @param[in] lock_context The lock context used for the corresponding lock
1143 * acquire.
1144 */
1145RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default(
1146  Thread_Control   *the_thread,
1147  ISR_lock_Context *lock_context
1148)
1149{
1150  _Thread_Lock_release_default_critical( the_thread, lock_context );
1151  _ISR_lock_ISR_enable( lock_context );
1152}
1153
1154/**
1155 * @brief Acquires the thread lock.
1156 *
1157 * @param[in] the_thread The thread.
1158 * @param[in] lock_context The lock context for _Thread_Lock_release().
1159 *
1160 * @return The lock required by _Thread_Lock_release().
1161 */
1162RTEMS_INLINE_ROUTINE void *_Thread_Lock_acquire(
1163  Thread_Control   *the_thread,
1164  ISR_lock_Context *lock_context
1165)
1166{
1167#if defined(RTEMS_SMP)
1168  SMP_ticket_lock_Control *lock;
1169
1170  while ( true ) {
1171    uint32_t my_generation;
1172
1173    _ISR_lock_ISR_disable( lock_context );
1174    my_generation = the_thread->Lock.generation;
1175
1176    /*
1177     * Ensure that we read the initial lock generation before we obtain our
1178     * current lock.
1179     */
1180    _Atomic_Fence( ATOMIC_ORDER_ACQUIRE );
1181
1182    lock = the_thread->Lock.current;
1183    _SMP_ticket_lock_Acquire(
1184      lock,
1185      &_Thread_Executing->Lock.Stats,
1186      &lock_context->Lock_context.Stats_context
1187    );
1188
1189    /*
1190     * Ensure that we read the second lock generation after we obtained our
1191     * current lock.
1192     */
1193    _Atomic_Fence( ATOMIC_ORDER_ACQUIRE );
1194
1195    if ( the_thread->Lock.generation == my_generation ) {
1196      break;
1197    }
1198
1199    _Thread_Lock_release( lock, lock_context );
1200  }
1201
1202  return lock;
1203#else
1204  _ISR_Disable( lock_context->isr_level );
1205
1206  return NULL;
1207#endif
1208}
1209
1210#if defined(RTEMS_SMP)
1211/*
1212 * Internal function, use _Thread_Lock_set() or _Thread_Lock_restore_default()
1213 * instead.
1214 */
1215RTEMS_INLINE_ROUTINE void _Thread_Lock_set_unprotected(
1216  Thread_Control          *the_thread,
1217  SMP_ticket_lock_Control *new_lock
1218)
1219{
1220  the_thread->Lock.current = new_lock;
1221
1222  /*
1223   * Ensure that the new lock is visible before we update the generation
1224   * number.  Otherwise someone would be able to read an up to date generation
1225   * number and an old lock.
1226   */
1227  _Atomic_Fence( ATOMIC_ORDER_RELEASE );
1228
1229  /*
1230   * Since we set a new lock right before, this increment is not protected by a
1231   * lock and thus must be an atomic operation.
1232   */
1233  _Atomic_Fetch_add_uint(
1234    &the_thread->Lock.generation,
1235    1,
1236    ATOMIC_ORDER_RELAXED
1237  );
1238}
1239#endif
1240
1241/**
1242 * @brief Sets a new thread lock.
1243 *
1244 * The caller must not be the owner of the default thread lock.  The caller
1245 * must be the owner of the new lock.
1246 *
1247 * @param[in] the_thread The thread.
1248 * @param[in] new_lock The new thread lock.
1249 */
1250#if defined(RTEMS_SMP)
1251RTEMS_INLINE_ROUTINE void _Thread_Lock_set(
1252  Thread_Control          *the_thread,
1253  SMP_ticket_lock_Control *new_lock
1254)
1255{
1256  ISR_lock_Context lock_context;
1257
1258  _Thread_Lock_acquire_default_critical( the_thread, &lock_context );
1259  _Assert( the_thread->Lock.current == &the_thread->Lock.Default );
1260  _Thread_Lock_set_unprotected( the_thread, new_lock );
1261  _Thread_Lock_release_default_critical( the_thread, &lock_context );
1262}
1263#else
1264#define _Thread_Lock_set( the_thread, new_lock ) \
1265  do { } while ( 0 )
1266#endif
1267
1268/**
1269 * @brief Restores the default thread lock.
1270 *
1271 * The caller must be the owner of the current thread lock.
1272 *
1273 * @param[in] the_thread The thread.
1274 */
1275#if defined(RTEMS_SMP)
1276RTEMS_INLINE_ROUTINE void _Thread_Lock_restore_default(
1277  Thread_Control *the_thread
1278)
1279{
1280  /*
1281   * Ensures that the stores to the wait queue and operations completed before
1282   * the default lock is restored.  See _Thread_Wait_set_queue() and
1283   * _Thread_Wait_restore_default_operations().
1284   */
1285  _Atomic_Fence( ATOMIC_ORDER_RELEASE );
1286
1287  _Thread_Lock_set_unprotected( the_thread, &the_thread->Lock.Default );
1288}
1289#else
1290#define _Thread_Lock_restore_default( the_thread ) \
1291  do { } while ( 0 )
1292#endif
1293
1294/**
1295 * @brief The initial thread wait flags value set by _Thread_Initialize().
1296 */
1297#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1298
1299/**
1300 * @brief Mask to get the thread wait state flags.
1301 */
1302#define THREAD_WAIT_STATE_MASK 0xffU
1303
1304/**
1305 * @brief Indicates that the thread begins with the blocking operation.
1306 *
1307 * A blocking operation consists of an optional watchdog initialization and the
1308 * setting of the appropriate thread blocking state with the corresponding
1309 * scheduler block operation.
1310 */
1311#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1312
1313/**
1314 * @brief Indicates that the thread completed the blocking operation.
1315 */
1316#define THREAD_WAIT_STATE_BLOCKED 0x2U
1317
1318/**
1319 * @brief Indicates that a condition to end the thread wait occurred.
1320 *
1321 * This could be a timeout, a signal, an event or a resource availability.
1322 */
1323#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1324
1325/**
1326 * @brief Mask to get the thread wait class flags.
1327 */
1328#define THREAD_WAIT_CLASS_MASK 0xff00U
1329
1330/**
1331 * @brief Indicates that the thread waits for an event.
1332 */
1333#define THREAD_WAIT_CLASS_EVENT 0x100U
1334
1335/**
1336 * @brief Indicates that the thread waits for a system event.
1337 */
1338#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1339
1340/**
1341 * @brief Indicates that the thread waits for a object.
1342 */
1343#define THREAD_WAIT_CLASS_OBJECT 0x400U
1344
1345RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1346  Thread_Control    *the_thread,
1347  Thread_Wait_flags  flags
1348)
1349{
1350#if defined(RTEMS_SMP)
1351  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1352#else
1353  the_thread->Wait.flags = flags;
1354#endif
1355}
1356
1357RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1358  const Thread_Control *the_thread
1359)
1360{
1361#if defined(RTEMS_SMP)
1362  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1363#else
1364  return the_thread->Wait.flags;
1365#endif
1366}
1367
1368/**
1369 * @brief Tries to change the thread wait flags inside a critical section
1370 * (interrupts disabled).
1371 *
1372 * In case the wait flags are equal to the expected wait flags, then the wait
1373 * flags are set to the desired wait flags.
1374 *
1375 * @param[in] the_thread The thread.
1376 * @param[in] expected_flags The expected wait flags.
1377 * @param[in] desired_flags The desired wait flags.
1378 *
1379 * @retval true The wait flags were equal to the expected wait flags.
1380 * @retval false Otherwise.
1381 */
1382RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_critical(
1383  Thread_Control    *the_thread,
1384  Thread_Wait_flags  expected_flags,
1385  Thread_Wait_flags  desired_flags
1386)
1387{
1388#if defined(RTEMS_SMP)
1389  return _Atomic_Compare_exchange_uint(
1390    &the_thread->Wait.flags,
1391    &expected_flags,
1392    desired_flags,
1393    ATOMIC_ORDER_RELAXED,
1394    ATOMIC_ORDER_RELAXED
1395  );
1396#else
1397  bool success = the_thread->Wait.flags == expected_flags;
1398
1399  if ( success ) {
1400    the_thread->Wait.flags = desired_flags;
1401  }
1402
1403  return success;
1404#endif
1405}
1406
1407/**
1408 * @brief Tries to change the thread wait flags.
1409 *
1410 * @see _Thread_Wait_flags_try_change_critical().
1411 */
1412RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change(
1413  Thread_Control    *the_thread,
1414  Thread_Wait_flags  expected_flags,
1415  Thread_Wait_flags  desired_flags
1416)
1417{
1418  bool success;
1419#if !defined(RTEMS_SMP)
1420  ISR_Level level;
1421
1422  _ISR_Disable_without_giant( level );
1423#endif
1424
1425  success = _Thread_Wait_flags_try_change_critical(
1426    the_thread,
1427    expected_flags,
1428    desired_flags
1429  );
1430
1431#if !defined(RTEMS_SMP)
1432  _ISR_Enable_without_giant( level );
1433#endif
1434
1435  return success;
1436}
1437
1438/**
1439 * @brief Sets the thread queue.
1440 *
1441 * The caller must be the owner of the thread lock.
1442 *
1443 * @param[in] the_thread The thread.
1444 * @param[in] new_queue The new queue.
1445 *
1446 * @see _Thread_Lock_set().
1447 */
1448RTEMS_INLINE_ROUTINE void _Thread_Wait_set_queue(
1449  Thread_Control     *the_thread,
1450  Thread_queue_Queue *new_queue
1451)
1452{
1453  the_thread->Wait.queue = new_queue;
1454}
1455
1456/**
1457 * @brief Sets the thread queue operations.
1458 *
1459 * The caller must be the owner of the thread lock.
1460 *
1461 * @param[in] the_thread The thread.
1462 * @param[in] new_operations The new queue operations.
1463 *
1464 * @see _Thread_Lock_set() and _Thread_Wait_restore_default_operations().
1465 */
1466RTEMS_INLINE_ROUTINE void _Thread_Wait_set_operations(
1467  Thread_Control                *the_thread,
1468  const Thread_queue_Operations *new_operations
1469)
1470{
1471  the_thread->Wait.operations = new_operations;
1472}
1473
1474/**
1475 * @brief Restores the default thread queue operations.
1476 *
1477 * The caller must be the owner of the thread lock.
1478 *
1479 * @param[in] the_thread The thread.
1480 *
1481 * @see _Thread_Wait_set_operations().
1482 */
1483RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default_operations(
1484  Thread_Control *the_thread
1485)
1486{
1487  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1488}
1489
1490/**
1491 * @brief Sets the thread wait timeout code.
1492 *
1493 * @param[in] the_thread The thread.
1494 * @param[in] timeout_code The new thread wait timeout code.
1495 */
1496RTEMS_INLINE_ROUTINE void _Thread_Wait_set_timeout_code(
1497  Thread_Control *the_thread,
1498  uint32_t        timeout_code
1499)
1500{
1501  the_thread->Wait.timeout_code = timeout_code;
1502}
1503
1504/**
1505 * @brief General purpose thread wait timeout.
1506 *
1507 * @param[in] id Unused.
1508 * @param[in] arg The thread.
1509 */
1510void _Thread_Timeout( Objects_Id id, void *arg );
1511
1512RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
1513  Thread_Control  *the_thread,
1514  Per_CPU_Control *cpu
1515)
1516{
1517#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
1518  the_thread->Scheduler.debug_real_cpu = cpu;
1519#else
1520  (void) the_thread;
1521  (void) cpu;
1522#endif
1523}
1524
1525#if !defined(__DYNAMIC_REENT__)
1526/**
1527 * This routine returns the C library re-enterant pointer.
1528 */
1529
1530RTEMS_INLINE_ROUTINE struct _reent **_Thread_Get_libc_reent( void )
1531{
1532  return _Thread_libc_reent;
1533}
1534
1535/**
1536 * This routine set the C library re-enterant pointer.
1537 */
1538
1539RTEMS_INLINE_ROUTINE void _Thread_Set_libc_reent (
1540  struct _reent **libc_reent
1541)
1542{
1543  _Thread_libc_reent = libc_reent;
1544}
1545#endif
1546
1547/** @}*/
1548
1549#ifdef __cplusplus
1550}
1551#endif
1552
1553#if defined(RTEMS_MULTIPROCESSING)
1554#include <rtems/score/threadmp.h>
1555#endif
1556
1557#endif
1558/* end of include file */
Note: See TracBrowser for help on using the repository browser.