source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 2d36931

4.11
Last change on this file since 2d36931 was 2d36931, checked in by Sebastian Huber <sebastian.huber@…>, on Jun 11, 2014 at 9:03:25 AM

score: Collect scheduler related fields in TCB

Add Thread_Scheduler_control to collect scheduler related fields of the
TCB.

  • Property mode set to 100644
File size: 21.6 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/chainimpl.h>
26#include <rtems/score/interr.h>
27#include <rtems/score/isr.h>
28#include <rtems/score/objectimpl.h>
29#include <rtems/score/resourceimpl.h>
30#include <rtems/score/statesimpl.h>
31#include <rtems/score/sysstate.h>
32#include <rtems/score/todimpl.h>
33#include <rtems/config.h>
34
35#ifdef __cplusplus
36extern "C" {
37#endif
38
39/**
40 * @addtogroup ScoreThread
41 */
42/**@{**/
43
44/**
45 *  The following structure contains the information necessary to manage
46 *  a thread which it is  waiting for a resource.
47 */
48#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
49
50/**
51 *  Self for the GNU Ada Run-Time
52 */
53SCORE_EXTERN void *rtems_ada_self;
54
55/**
56 *  The following defines the information control block used to
57 *  manage this class of objects.
58 */
59SCORE_EXTERN Objects_Information _Thread_Internal_information;
60
61/**
62 *  The following points to the thread whose floating point
63 *  context is currently loaded.
64 */
65#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
66SCORE_EXTERN Thread_Control *_Thread_Allocated_fp;
67#endif
68
69#if !defined(__DYNAMIC_REENT__)
70/**
71 * The C library re-enter-rant global pointer. Some C library implementations
72 * such as newlib have a single global pointer that changed during a context
73 * switch. The pointer points to that global pointer. The Thread control block
74 * holds a pointer to the task specific data.
75 */
76SCORE_EXTERN struct _reent **_Thread_libc_reent;
77#endif
78
79/**
80 *  @brief Initialize thread handler.
81 *
82 *  This routine performs the initialization necessary for this handler.
83 */
84void _Thread_Handler_initialization(void);
85
86/**
87 *  @brief Create idle thread.
88 *
89 *  This routine creates the idle thread.
90 *
91 *  @warning No thread should be created before this one.
92 */
93void _Thread_Create_idle(void);
94
95/**
96 *  @brief Start thread multitasking.
97 *
98 *  This routine initiates multitasking.  It is invoked only as
99 *  part of initialization and its invocation is the last act of
100 *  the non-multitasking part of the system initialization.
101 */
102void _Thread_Start_multitasking( void ) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
103
104/**
105 *  @brief Allocate the requested stack space for the thread.
106 *
107 *  Allocate the requested stack space for the thread.
108 *  Set the Start.stack field to the address of the stack.
109 *
110 *  @param[in] the_thread is the thread where the stack space is requested
111 *
112 *  @retval actual size allocated after any adjustment
113 *  @retval zero if the allocation failed
114 */
115size_t _Thread_Stack_Allocate(
116  Thread_Control *the_thread,
117  size_t          stack_size
118);
119
120/**
121 *  @brief Deallocate thread stack.
122 *
123 *  Deallocate the Thread's stack.
124 */
125void _Thread_Stack_Free(
126  Thread_Control *the_thread
127);
128
129/**
130 *  @brief Initialize thread.
131 *
132 *  This routine initializes the specified the thread.  It allocates
133 *  all memory associated with this thread.  It completes by adding
134 *  the thread to the local object table so operations on this
135 *  thread id are allowed.
136 *
137 *  @note If stack_area is NULL, it is allocated from the workspace.
138 *
139 *  @note If the stack is allocated from the workspace, then it is
140 *        guaranteed to be of at least minimum size.
141 */
142bool _Thread_Initialize(
143  Objects_Information                  *information,
144  Thread_Control                       *the_thread,
145  const struct Scheduler_Control       *scheduler,
146  void                                 *stack_area,
147  size_t                                stack_size,
148  bool                                  is_fp,
149  Priority_Control                      priority,
150  bool                                  is_preemptible,
151  Thread_CPU_budget_algorithms          budget_algorithm,
152  Thread_CPU_budget_algorithm_callout   budget_callout,
153  uint32_t                              isr_level,
154  Objects_Name                          name
155);
156
157/**
158 *  @brief Initializes thread and executes it.
159 *
160 *  This routine initializes the executable information for a thread
161 *  and makes it ready to execute.  After this routine executes, the
162 *  thread competes with all other threads for CPU time.
163 *
164 *  @param the_thread is the thread to be initialized
165 *  @param the_prototype
166 *  @param entry_point
167 *  @param pointer_argument
168 *  @param numeric_argument
169 *  @param[in,out] cpu The processor if used to start an idle thread
170 *  during system initialization.  Must be set to @c NULL to start a normal
171 *  thread.
172 */
173bool _Thread_Start(
174  Thread_Control            *the_thread,
175  Thread_Start_types         the_prototype,
176  void                      *entry_point,
177  void                      *pointer_argument,
178  Thread_Entry_numeric_type  numeric_argument,
179  Per_CPU_Control           *cpu
180);
181
182bool _Thread_Restart(
183  Thread_Control            *the_thread,
184  Thread_Control            *executing,
185  void                      *pointer_argument,
186  Thread_Entry_numeric_type  numeric_argument
187);
188
189void _Thread_Yield( Thread_Control *executing );
190
191bool _Thread_Set_life_protection( bool protect );
192
193void _Thread_Life_action_handler(
194  Thread_Control  *executing,
195  Thread_Action   *action,
196  Per_CPU_Control *cpu,
197  ISR_Level        level
198);
199
200/**
201 * @brief Kills all zombie threads in the system.
202 *
203 * Threads change into the zombie state as the last step in the thread
204 * termination sequence right before a context switch to the heir thread is
205 * initiated.  Since the thread stack is still in use during this phase we have
206 * to postpone the thread stack reclamation until this point.  On SMP
207 * configurations we may have to busy wait for context switch completion here.
208 */
209void _Thread_Kill_zombies( void );
210
211/**
212 * @brief Closes the thread.
213 *
214 * Closes the thread object and starts the thread termination sequence.  In
215 * case the executing thread is not terminated, then this function waits until
216 * the terminating thread reached the zombie state.
217 */
218void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
219
220/**
221 *  @brief Removes any set states for @a the_thread.
222 *
223 *  This routine removes any set states for @a the_thread.  It performs
224 *  any necessary scheduling operations including the selection of
225 *  a new heir thread.
226 *
227 *  - INTERRUPT LATENCY:
228 *    + ready chain
229 *    + select heir
230 */
231void _Thread_Ready(
232  Thread_Control *the_thread
233);
234
235/**
236 *  @brief Clears the indicated STATES for @a the_thread.
237 *
238 *  This routine clears the indicated STATES for @a the_thread.  It performs
239 *  any necessary scheduling operations including the selection of
240 *  a new heir thread.
241 *
242 *  - INTERRUPT LATENCY:
243 *    + priority map
244 *    + select heir
245 */
246void _Thread_Clear_state(
247  Thread_Control *the_thread,
248  States_Control  state
249);
250
251/**
252 *  @brief Sets the indicated @a state for @a the_thread.
253 *
254 *  This routine sets the indicated @a state for @a the_thread.  It performs
255 *  any necessary scheduling operations including the selection of
256 *  a new heir thread.
257 *
258 *  @param[in] the_thread is the thread to set the state for.
259 *  @param[in] state is the state to set the_thread to.
260 *
261 *  - INTERRUPT LATENCY:
262 *   + ready chain
263 *   + select map
264 */
265void _Thread_Set_state(
266  Thread_Control *the_thread,
267  States_Control  state
268);
269
270/**
271 *  @brief Initializes enviroment for a thread.
272 *
273 *  This routine initializes the context of @a the_thread to its
274 *  appropriate starting state.
275 *
276 *  @param[in] the_thread is the pointer to the thread control block.
277 */
278void _Thread_Load_environment(
279  Thread_Control *the_thread
280);
281
282/**
283 *  @brief Wrapper function for all threads.
284 *
285 *  This routine is the wrapper function for all threads.  It is
286 *  the starting point for all threads.  The user provided thread
287 *  entry point is invoked by this routine.  Operations
288 *  which must be performed immediately before and after the user's
289 *  thread executes are found here.
290 *
291 *  @note On entry, it is assumed all interrupts are blocked and that this
292 *  routine needs to set the initial isr level.  This may or may not
293 *  actually be needed by the context switch routine and as a result
294 *  interrupts may already be at there proper level.  Either way,
295 *  setting the initial isr level properly here is safe.
296 */
297void _Thread_Handler( void );
298
299/**
300 *  @brief Ended the delay of a thread.
301 *
302 *  This routine is invoked when a thread must be unblocked at the
303 *  end of a time based delay (i.e. wake after or wake when).
304 *  It is called by the watchdog handler.
305 *
306 *  @param[in] id is the thread id
307 */
308void _Thread_Delay_ended(
309  Objects_Id  id,
310  void       *ignored
311);
312
313/**
314 *  @brief Change the priority of a thread.
315 *
316 *  This routine changes the current priority of @a the_thread to
317 *  @a new_priority.  It performs any necessary scheduling operations
318 *  including the selection of a new heir thread.
319 *
320 *  @param[in] the_thread is the thread to change
321 *  @param[in] new_priority is the priority to set @a the_thread to
322 *  @param[in] prepend_it is a switch to prepend the thread
323 */
324void _Thread_Change_priority (
325  Thread_Control   *the_thread,
326  Priority_Control  new_priority,
327  bool              prepend_it
328);
329
330/**
331 *  @brief Set thread priority.
332 *
333 *  This routine updates the priority related fields in the_thread
334 *  control block to indicate the current priority is now new_priority.
335 */
336void _Thread_Set_priority(
337  Thread_Control   *the_thread,
338  Priority_Control  new_priority
339);
340
341/**
342 *  This routine updates the related suspend fields in the_thread
343 *  control block to indicate the current nested level.
344 */
345#define _Thread_Suspend( _the_thread ) \
346        _Thread_Set_state( _the_thread, STATES_SUSPENDED )
347
348/**
349 *  This routine updates the related suspend fields in the_thread
350 *  control block to indicate the current nested level.  A force
351 *  parameter of true will force a resume and clear the suspend count.
352 */
353#define _Thread_Resume( _the_thread ) \
354        _Thread_Clear_state( _the_thread, STATES_SUSPENDED )
355
356/**
357 *  @brief Maps thread Id to a TCB pointer.
358 *
359 *  This function maps thread IDs to thread control
360 *  blocks.  If ID corresponds to a local thread, then it
361 *  returns the_thread control pointer which maps to ID
362 *  and @a location is set to OBJECTS_LOCAL.  If the thread ID is
363 *  global and resides on a remote node, then location is set
364 *  to OBJECTS_REMOTE, and the_thread is undefined.
365 *  Otherwise, location is set to OBJECTS_ERROR and
366 *  the_thread is undefined.
367 *
368 *  @param[in] id is the id of the thread.
369 *  @param[in] location is the location of the block.
370 *
371 *  @note  The performance of many RTEMS services depends upon
372 *         the quick execution of the "good object" path in this
373 *         routine.  If there is a possibility of saving a few
374 *         cycles off the execution time, this routine is worth
375 *         further optimization attention.
376 */
377Thread_Control *_Thread_Get (
378  Objects_Id         id,
379  Objects_Locations *location
380);
381
382/**
383 *  @brief Cancel a blocking operation due to ISR.
384 *
385 *  This method is used to cancel a blocking operation that was
386 *  satisfied from an ISR while the thread executing was in the
387 *  process of blocking.
388 *
389 *  This method will restore the previous ISR disable level during the cancel
390 *  operation.  Thus it is an implicit _ISR_Enable().
391 *
392 *  @param[in] sync_state is the synchronization state
393 *  @param[in] the_thread is the thread whose blocking is canceled
394 *  @param[in] level is the previous ISR disable level
395 *
396 *  @note This is a rare routine in RTEMS.  It is called with
397 *        interrupts disabled and only when an ISR completed
398 *        a blocking condition in process.
399 */
400void _Thread_blocking_operation_Cancel(
401  Thread_blocking_operation_States  sync_state,
402  Thread_Control                   *the_thread,
403  ISR_Level                         level
404);
405
406RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
407  const Thread_Control *thread
408)
409{
410#if defined(RTEMS_SMP)
411  return thread->Scheduler.cpu;
412#else
413  (void) thread;
414
415  return _Per_CPU_Get();
416#endif
417}
418
419RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
420  Thread_Control *thread,
421  Per_CPU_Control *cpu
422)
423{
424#if defined(RTEMS_SMP)
425  thread->Scheduler.cpu = cpu;
426#else
427  (void) thread;
428  (void) cpu;
429#endif
430}
431
432/**
433 * This function returns true if the_thread is the currently executing
434 * thread, and false otherwise.
435 */
436
437RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
438  const Thread_Control *the_thread
439)
440{
441  return ( the_thread == _Thread_Executing );
442}
443
444#if defined(RTEMS_SMP)
445/**
446 * @brief Returns @true in case the thread executes currently on some processor
447 * in the system, otherwise @a false.
448 *
449 * Do not confuse this with _Thread_Is_executing() which checks only the
450 * current processor.
451 */
452RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
453  const Thread_Control *the_thread
454)
455{
456  return _CPU_Context_Get_is_executing( &the_thread->Registers );
457}
458#endif
459
460/**
461 * This function returns true if the_thread is the heir
462 * thread, and false otherwise.
463 */
464
465RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
466  const Thread_Control *the_thread
467)
468{
469  return ( the_thread == _Thread_Heir );
470}
471
472/**
473 * This routine clears any blocking state for the_thread.  It performs
474 * any necessary scheduling operations including the selection of
475 * a new heir thread.
476 */
477
478RTEMS_INLINE_ROUTINE void _Thread_Unblock (
479  Thread_Control *the_thread
480)
481{
482  _Thread_Clear_state( the_thread, STATES_BLOCKED );
483}
484
485/**
486 * This routine resets the current context of the calling thread
487 * to that of its initial state.
488 */
489
490RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
491{
492#if defined(RTEMS_SMP)
493  ISR_Level level;
494
495  _Giant_Release();
496
497  _ISR_Disable_without_giant( level );
498  ( void ) level;
499#endif
500
501#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
502  if ( executing->fp_context != NULL )
503    _Context_Restore_fp( &executing->fp_context );
504#endif
505
506  _CPU_Context_Restart_self( &executing->Registers );
507}
508
509/**
510 * This function returns true if the floating point context of
511 * the_thread is currently loaded in the floating point unit, and
512 * false otherwise.
513 */
514
515#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
516RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
517  const Thread_Control *the_thread
518)
519{
520  return ( the_thread == _Thread_Allocated_fp );
521}
522#endif
523
524/**
525 * This routine is invoked when the currently loaded floating
526 * point context is now longer associated with an active thread.
527 */
528
529#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
530RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
531{
532  _Thread_Allocated_fp = NULL;
533}
534#endif
535
536/**
537 * This function returns true if dispatching is disabled, and false
538 * otherwise.
539 */
540
541RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
542{
543  return ( _Thread_Dispatch_necessary );
544}
545
546/**
547 * This function returns true if the_thread is NULL and false otherwise.
548 */
549
550RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
551  const Thread_Control *the_thread
552)
553{
554  return ( the_thread == NULL );
555}
556
557/**
558 * @brief Is proxy blocking.
559 *
560 * status which indicates that a proxy is blocking, and false otherwise.
561 */
562RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
563  uint32_t   code
564)
565{
566  return (code == THREAD_STATUS_PROXY_BLOCKING);
567}
568
569RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
570{
571  /* Idle threads */
572  uint32_t maximum_internal_threads =
573    rtems_configuration_get_maximum_processors();
574
575  /* MPCI thread */
576#if defined(RTEMS_MULTIPROCESSING)
577  if ( _System_state_Is_multiprocessing ) {
578    ++maximum_internal_threads;
579  }
580#endif
581
582  return maximum_internal_threads;
583}
584
585RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
586{
587  return (Thread_Control *)
588    _Objects_Allocate_unprotected( &_Thread_Internal_information );
589}
590
591RTEMS_INLINE_ROUTINE void _Thread_Request_dispatch_if_executing(
592  Thread_Control *thread
593)
594{
595#if defined(RTEMS_SMP)
596  if ( _Thread_Is_executing_on_a_processor( thread ) ) {
597    const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
598    Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
599
600    cpu_of_thread->dispatch_necessary = true;
601
602    if ( cpu_of_executing != cpu_of_thread ) {
603      _Per_CPU_Send_interrupt( cpu_of_thread );
604    }
605  }
606#else
607  (void) thread;
608#endif
609}
610
611RTEMS_INLINE_ROUTINE void _Thread_Signal_notification( Thread_Control *thread )
612{
613  if ( _ISR_Is_in_progress() && _Thread_Is_executing( thread ) ) {
614    _Thread_Dispatch_necessary = true;
615  } else {
616#if defined(RTEMS_SMP)
617    if ( _Thread_Is_executing_on_a_processor( thread ) ) {
618      const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
619      Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
620
621      if ( cpu_of_executing != cpu_of_thread ) {
622        cpu_of_thread->dispatch_necessary = true;
623        _Per_CPU_Send_interrupt( cpu_of_thread );
624      }
625    }
626#endif
627  }
628}
629
630/**
631 * @brief Gets the heir of the processor and makes it executing.
632 *
633 * The thread dispatch necessary indicator is cleared as a side-effect.
634 *
635 * @return The heir thread.
636 *
637 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
638 * _Scheduler_SMP_Update_heir().
639 */
640RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
641  Per_CPU_Control *cpu_self
642)
643{
644  Thread_Control *heir;
645
646  cpu_self->dispatch_necessary = false;
647
648#if defined( RTEMS_SMP )
649  /*
650   * It is critical that we first update the dispatch necessary and then the
651   * read the heir so that we don't miss an update by
652   * _Scheduler_SMP_Update_heir().
653   */
654  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
655#endif
656
657  heir = cpu_self->heir;
658  cpu_self->executing = heir;
659
660  return heir;
661}
662
663RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used(
664  Thread_Control *executing,
665  Timestamp_Control *time_of_last_context_switch
666)
667{
668  Timestamp_Control uptime;
669  Timestamp_Control ran;
670
671  _TOD_Get_uptime( &uptime );
672  _Timestamp_Subtract(
673    time_of_last_context_switch,
674    &uptime,
675    &ran
676  );
677  *time_of_last_context_switch = uptime;
678  _Timestamp_Add_to( &executing->cpu_time_used, &ran );
679}
680
681RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
682  Thread_Action_control *action_control
683)
684{
685  _Chain_Initialize_empty( &action_control->Chain );
686}
687
688RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
689  Thread_Action         *action,
690  Thread_Action_handler  handler
691)
692{
693  action->handler = handler;
694  _Chain_Set_off_chain( &action->Node );
695}
696
697RTEMS_INLINE_ROUTINE Per_CPU_Control *
698  _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level )
699{
700  Per_CPU_Control *cpu;
701
702  _ISR_Disable_without_giant( *level );
703  cpu = _Per_CPU_Get();
704  _Per_CPU_Acquire( cpu );
705
706  return cpu;
707}
708
709RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire(
710  Thread_Control *thread,
711  ISR_Level      *level
712)
713{
714  Per_CPU_Control *cpu;
715
716  _ISR_Disable_without_giant( *level );
717  cpu = _Thread_Get_CPU( thread );
718  _Per_CPU_Acquire( cpu );
719
720  return cpu;
721}
722
723RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable(
724  Per_CPU_Control *cpu,
725  ISR_Level level
726)
727{
728  _Per_CPU_Release_and_ISR_enable( cpu, level );
729}
730
731RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
732  Thread_Control *thread,
733  Thread_Action  *action
734)
735{
736  Per_CPU_Control *cpu;
737  ISR_Level        level;
738
739  cpu = _Thread_Action_ISR_disable_and_acquire( thread, &level );
740  _Chain_Append_if_is_off_chain_unprotected(
741    &thread->Post_switch_actions.Chain,
742    &action->Node
743  );
744  _Thread_Action_release_and_ISR_enable( cpu, level );
745}
746
747RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
748  Thread_Life_state life_state
749)
750{
751  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
752}
753
754RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
755  Thread_Life_state life_state
756)
757{
758  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
759}
760
761RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected(
762  Thread_Life_state life_state
763)
764{
765  return ( life_state & THREAD_LIFE_PROTECTED ) != 0;
766}
767
768RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
769  Thread_Life_state life_state
770)
771{
772  return ( life_state & THREAD_LIFE_RESTARTING_TERMINATING ) != 0;
773}
774
775/**
776 * @brief Returns true if the thread owns resources, and false otherwise.
777 *
778 * Resources are accounted with the Thread_Control::resource_count resource
779 * counter.  This counter is used by semaphore objects for example.
780 *
781 * In addition to the resource counter there is a resource dependency tree
782 * available on SMP configurations.  In case this tree is non-empty, then the
783 * thread owns resources.
784 *
785 * @param[in] the_thread The thread.
786 */
787RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
788  const Thread_Control *the_thread
789)
790{
791  bool owns_resources = the_thread->resource_count != 0;
792
793#if defined(RTEMS_SMP)
794  owns_resources = owns_resources
795    || _Resource_Node_owns_resources( &the_thread->Resource_node );
796#endif
797
798  return owns_resources;
799}
800
801RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
802  Thread_Control  *the_thread,
803  Per_CPU_Control *cpu
804)
805{
806#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
807  the_thread->Scheduler.debug_real_cpu = cpu;
808#else
809  (void) the_thread;
810  (void) cpu;
811#endif
812}
813
814#if !defined(__DYNAMIC_REENT__)
815/**
816 * This routine returns the C library re-enterant pointer.
817 */
818
819RTEMS_INLINE_ROUTINE struct _reent **_Thread_Get_libc_reent( void )
820{
821  return _Thread_libc_reent;
822}
823
824/**
825 * This routine set the C library re-enterant pointer.
826 */
827
828RTEMS_INLINE_ROUTINE void _Thread_Set_libc_reent (
829  struct _reent **libc_reent
830)
831{
832  _Thread_libc_reent = libc_reent;
833}
834#endif
835
836/** @}*/
837
838#ifdef __cplusplus
839}
840#endif
841
842#if defined(RTEMS_MULTIPROCESSING)
843#include <rtems/score/threadmp.h>
844#endif
845
846#endif
847/* end of include file */
Note: See TracBrowser for help on using the repository browser.