source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ f5d4570f

4.115
Last change on this file since f5d4570f was f5d4570f, checked in by Sebastian Huber <sebastian.huber@…>, on 07/11/15 at 19:14:46

score: Simplify _Thread_Lock_set()

Exploit the fact that the current thread lock must be the default thread
lock and interrupts are disabled if we call _Thread_Lock_set().

  • Property mode set to 100644
File size: 37.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/resourceimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/sysstate.h>
33#include <rtems/score/threadqimpl.h>
34#include <rtems/score/todimpl.h>
35#include <rtems/config.h>
36
37#ifdef __cplusplus
38extern "C" {
39#endif
40
41/**
42 * @addtogroup ScoreThread
43 */
44/**@{**/
45
46/**
47 *  The following structure contains the information necessary to manage
48 *  a thread which it is  waiting for a resource.
49 */
50#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
51
52/**
53 *  Self for the GNU Ada Run-Time
54 */
55SCORE_EXTERN void *rtems_ada_self;
56
57/**
58 *  The following defines the information control block used to
59 *  manage this class of objects.
60 */
61SCORE_EXTERN Objects_Information _Thread_Internal_information;
62
63/**
64 *  The following points to the thread whose floating point
65 *  context is currently loaded.
66 */
67#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
68SCORE_EXTERN Thread_Control *_Thread_Allocated_fp;
69#endif
70
71#if !defined(__DYNAMIC_REENT__)
72/**
73 * The C library re-enter-rant global pointer. Some C library implementations
74 * such as newlib have a single global pointer that changed during a context
75 * switch. The pointer points to that global pointer. The Thread control block
76 * holds a pointer to the task specific data.
77 */
78SCORE_EXTERN struct _reent **_Thread_libc_reent;
79#endif
80
81#define THREAD_CHAIN_NODE_TO_THREAD( node ) \
82  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.Chain )
83
84#define THREAD_RBTREE_NODE_TO_THREAD( node ) \
85  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.RBTree )
86
87#if defined(RTEMS_SMP)
88#define THREAD_RESOURCE_NODE_TO_THREAD( node ) \
89  RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node )
90#endif
91
92/**
93 *  @brief Initialize thread handler.
94 *
95 *  This routine performs the initialization necessary for this handler.
96 */
97void _Thread_Handler_initialization(void);
98
99/**
100 *  @brief Create idle thread.
101 *
102 *  This routine creates the idle thread.
103 *
104 *  @warning No thread should be created before this one.
105 */
106void _Thread_Create_idle(void);
107
108/**
109 *  @brief Start thread multitasking.
110 *
111 *  This routine initiates multitasking.  It is invoked only as
112 *  part of initialization and its invocation is the last act of
113 *  the non-multitasking part of the system initialization.
114 */
115void _Thread_Start_multitasking( void ) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
116
117/**
118 *  @brief Allocate the requested stack space for the thread.
119 *
120 *  Allocate the requested stack space for the thread.
121 *  Set the Start.stack field to the address of the stack.
122 *
123 *  @param[in] the_thread is the thread where the stack space is requested
124 *  @param[in] stack_size is the stack space is requested
125 *
126 *  @retval actual size allocated after any adjustment
127 *  @retval zero if the allocation failed
128 */
129size_t _Thread_Stack_Allocate(
130  Thread_Control *the_thread,
131  size_t          stack_size
132);
133
134/**
135 *  @brief Deallocate thread stack.
136 *
137 *  Deallocate the Thread's stack.
138 */
139void _Thread_Stack_Free(
140  Thread_Control *the_thread
141);
142
143/**
144 *  @brief Initialize thread.
145 *
146 *  This routine initializes the specified the thread.  It allocates
147 *  all memory associated with this thread.  It completes by adding
148 *  the thread to the local object table so operations on this
149 *  thread id are allowed.
150 *
151 *  @note If stack_area is NULL, it is allocated from the workspace.
152 *
153 *  @note If the stack is allocated from the workspace, then it is
154 *        guaranteed to be of at least minimum size.
155 */
156bool _Thread_Initialize(
157  Objects_Information                  *information,
158  Thread_Control                       *the_thread,
159  const struct Scheduler_Control       *scheduler,
160  void                                 *stack_area,
161  size_t                                stack_size,
162  bool                                  is_fp,
163  Priority_Control                      priority,
164  bool                                  is_preemptible,
165  Thread_CPU_budget_algorithms          budget_algorithm,
166  Thread_CPU_budget_algorithm_callout   budget_callout,
167  uint32_t                              isr_level,
168  Objects_Name                          name
169);
170
171/**
172 *  @brief Initializes thread and executes it.
173 *
174 *  This routine initializes the executable information for a thread
175 *  and makes it ready to execute.  After this routine executes, the
176 *  thread competes with all other threads for CPU time.
177 *
178 *  @param the_thread is the thread to be initialized
179 *  @param the_prototype
180 *  @param entry_point
181 *  @param pointer_argument
182 *  @param numeric_argument
183 *  @param[in,out] cpu The processor if used to start an idle thread
184 *  during system initialization.  Must be set to @c NULL to start a normal
185 *  thread.
186 */
187bool _Thread_Start(
188  Thread_Control            *the_thread,
189  Thread_Start_types         the_prototype,
190  void                      *entry_point,
191  void                      *pointer_argument,
192  Thread_Entry_numeric_type  numeric_argument,
193  Per_CPU_Control           *cpu
194);
195
196bool _Thread_Restart(
197  Thread_Control            *the_thread,
198  Thread_Control            *executing,
199  void                      *pointer_argument,
200  Thread_Entry_numeric_type  numeric_argument
201);
202
203void _Thread_Yield( Thread_Control *executing );
204
205bool _Thread_Set_life_protection( bool protect );
206
207void _Thread_Life_action_handler(
208  Thread_Control  *executing,
209  Thread_Action   *action,
210  Per_CPU_Control *cpu,
211  ISR_Level        level
212);
213
214/**
215 * @brief Kills all zombie threads in the system.
216 *
217 * Threads change into the zombie state as the last step in the thread
218 * termination sequence right before a context switch to the heir thread is
219 * initiated.  Since the thread stack is still in use during this phase we have
220 * to postpone the thread stack reclamation until this point.  On SMP
221 * configurations we may have to busy wait for context switch completion here.
222 */
223void _Thread_Kill_zombies( void );
224
225/**
226 * @brief Closes the thread.
227 *
228 * Closes the thread object and starts the thread termination sequence.  In
229 * case the executing thread is not terminated, then this function waits until
230 * the terminating thread reached the zombie state.
231 */
232void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
233
234/**
235 * @brief Clears the specified thread state.
236 *
237 * In case the previous state is a non-ready state and the next state is the
238 * ready state, then the thread is unblocked by the scheduler.
239 *
240 * @param[in] the_thread The thread.
241 * @param[in] state The state to clear.  It must not be zero.
242 *
243 * @return The previous state.
244 */
245States_Control _Thread_Clear_state(
246  Thread_Control *the_thread,
247  States_Control  state
248);
249
250/**
251 * @brief Sets the specified thread state.
252 *
253 * In case the previous state is the ready state, then the thread is blocked by
254 * the scheduler.
255 *
256 * @param[in] the_thread The thread.
257 * @param[in] state The state to set.  It must not be zero.
258 *
259 * @return The previous state.
260 */
261States_Control _Thread_Set_state(
262  Thread_Control *the_thread,
263  States_Control  state
264);
265
266/**
267 * @brief Clears all thread states.
268 *
269 * In case the previous state is a non-ready state, then the thread is
270 * unblocked by the scheduler.
271 *
272 * @param[in] the_thread The thread.
273 */
274RTEMS_INLINE_ROUTINE void _Thread_Ready(
275  Thread_Control *the_thread
276)
277{
278  _Thread_Clear_state( the_thread, STATES_ALL_SET );
279}
280
281/**
282 *  @brief Initializes enviroment for a thread.
283 *
284 *  This routine initializes the context of @a the_thread to its
285 *  appropriate starting state.
286 *
287 *  @param[in] the_thread is the pointer to the thread control block.
288 */
289void _Thread_Load_environment(
290  Thread_Control *the_thread
291);
292
293/**
294 *  @brief Wrapper function for all threads.
295 *
296 *  This routine is the wrapper function for all threads.  It is
297 *  the starting point for all threads.  The user provided thread
298 *  entry point is invoked by this routine.  Operations
299 *  which must be performed immediately before and after the user's
300 *  thread executes are found here.
301 *
302 *  @note On entry, it is assumed all interrupts are blocked and that this
303 *  routine needs to set the initial isr level.  This may or may not
304 *  actually be needed by the context switch routine and as a result
305 *  interrupts may already be at there proper level.  Either way,
306 *  setting the initial isr level properly here is safe.
307 */
308void _Thread_Handler( void );
309
310/**
311 * @brief Executes the global constructors and then restarts itself as the
312 * first initialization thread.
313 *
314 * The first initialization thread is the first RTEMS initialization task or
315 * the first POSIX initialization thread in case no RTEMS initialization tasks
316 * are present.
317 */
318void *_Thread_Global_construction( void );
319
320/**
321 *  @brief Ended the delay of a thread.
322 *
323 *  This routine is invoked when a thread must be unblocked at the
324 *  end of a time based delay (i.e. wake after or wake when).
325 *  It is called by the watchdog handler.
326 *
327 *  @param[in] id is the thread id
328 *  @param[in] ignored is not used
329 */
330void _Thread_Delay_ended(
331  Objects_Id  id,
332  void       *ignored
333);
334
335/**
336 * @brief Returns true if the left thread priority is less than the right
337 * thread priority in the intuitive sense of priority and false otherwise.
338 */
339RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
340  Priority_Control left,
341  Priority_Control right
342)
343{
344  return left > right;
345}
346
347/**
348 * @brief Returns the highest priority of the left and right thread priorities
349 * in the intuitive sense of priority.
350 */
351RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
352  Priority_Control left,
353  Priority_Control right
354)
355{
356  return _Thread_Priority_less_than( left, right ) ? right : left;
357}
358
359/**
360 * @brief Filters a thread priority change.
361 *
362 * Called by _Thread_Change_priority() under the protection of the thread lock.
363 *
364 * @param[in] the_thread The thread.
365 * @param[in, out] new_priority The new priority of the thread.  The filter may
366 * alter this value.
367 * @param[in] arg The argument passed to _Thread_Change_priority().
368 *
369 * @retval true Change the current priority.
370 * @retval false Otherwise.
371 */
372typedef bool ( *Thread_Change_priority_filter )(
373  Thread_Control   *the_thread,
374  Priority_Control *new_priority,
375  void             *arg
376);
377
378/**
379 * @brief Changes the priority of a thread if allowed by the filter function.
380 *
381 * It changes current priority of the thread to the new priority in case the
382 * filter function returns true.  In this case the scheduler is notified of the
383 * priority change as well.
384 *
385 * @param[in] the_thread The thread.
386 * @param[in] new_priority The new priority of the thread.
387 * @param[in] arg The argument for the filter function.
388 * @param[in] filter The filter function to determine if a priority change is
389 * allowed and optionally perform other actions under the protection of the
390 * thread lock simultaneously with the update of the current priority.
391 * @param[in] prepend_it In case this is true, then the thread is prepended to
392 * its priority group in its scheduler instance, otherwise it is appended.
393 */
394void _Thread_Change_priority(
395  Thread_Control                *the_thread,
396  Priority_Control               new_priority,
397  void                          *arg,
398  Thread_Change_priority_filter  filter,
399  bool                           prepend_it
400);
401
402/**
403 * @brief Raises the priority of a thread.
404 *
405 * It changes the current priority of the thread to the new priority if the new
406 * priority is higher than the current priority.  In this case the thread is
407 * appended to its new priority group in its scheduler instance.
408 *
409 * @param[in] the_thread The thread.
410 * @param[in] new_priority The new priority of the thread.
411 *
412 * @see _Thread_Change_priority().
413 */
414void _Thread_Raise_priority(
415  Thread_Control   *the_thread,
416  Priority_Control  new_priority
417);
418
419/**
420 * @brief Sets the current to the real priority of a thread.
421 *
422 * Sets the priority restore hint to false.
423 */
424void _Thread_Restore_priority( Thread_Control *the_thread );
425
426/**
427 * @brief Sets the priority of a thread.
428 *
429 * It sets the real priority of the thread.  In addition it changes the current
430 * priority of the thread if the new priority is higher than the current
431 * priority or the thread owns no resources.
432 *
433 * @param[in] the_thread The thread.
434 * @param[in] new_priority The new priority of the thread.
435 * @param[out] old_priority The old real priority of the thread.  This pointer
436 * must not be @c NULL.
437 * @param[in] prepend_it In case this is true, then the thread is prepended to
438 * its priority group in its scheduler instance, otherwise it is appended.
439 *
440 * @see _Thread_Change_priority().
441 */
442void _Thread_Set_priority(
443  Thread_Control   *the_thread,
444  Priority_Control  new_priority,
445  Priority_Control *old_priority,
446  bool              prepend_it
447);
448
449/**
450 *  @brief Maps thread Id to a TCB pointer.
451 *
452 *  This function maps thread IDs to thread control
453 *  blocks.  If ID corresponds to a local thread, then it
454 *  returns the_thread control pointer which maps to ID
455 *  and @a location is set to OBJECTS_LOCAL.  If the thread ID is
456 *  global and resides on a remote node, then location is set
457 *  to OBJECTS_REMOTE, and the_thread is undefined.
458 *  Otherwise, location is set to OBJECTS_ERROR and
459 *  the_thread is undefined.
460 *
461 *  @param[in] id is the id of the thread.
462 *  @param[in] location is the location of the block.
463 *
464 *  @note  The performance of many RTEMS services depends upon
465 *         the quick execution of the "good object" path in this
466 *         routine.  If there is a possibility of saving a few
467 *         cycles off the execution time, this routine is worth
468 *         further optimization attention.
469 */
470Thread_Control *_Thread_Get (
471  Objects_Id         id,
472  Objects_Locations *location
473);
474
475/**
476 * @brief Gets a thread by its identifier.
477 *
478 * @see _Objects_Get_isr_disable().
479 */
480Thread_Control *_Thread_Get_interrupt_disable(
481  Objects_Id         id,
482  Objects_Locations *location,
483  ISR_lock_Context  *lock_context
484);
485
486RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
487  const Thread_Control *thread
488)
489{
490#if defined(RTEMS_SMP)
491  return thread->Scheduler.cpu;
492#else
493  (void) thread;
494
495  return _Per_CPU_Get();
496#endif
497}
498
499RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
500  Thread_Control *thread,
501  Per_CPU_Control *cpu
502)
503{
504#if defined(RTEMS_SMP)
505  thread->Scheduler.cpu = cpu;
506#else
507  (void) thread;
508  (void) cpu;
509#endif
510}
511
512/**
513 * This function returns true if the_thread is the currently executing
514 * thread, and false otherwise.
515 */
516
517RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
518  const Thread_Control *the_thread
519)
520{
521  return ( the_thread == _Thread_Executing );
522}
523
524#if defined(RTEMS_SMP)
525/**
526 * @brief Returns @a true in case the thread executes currently on some
527 * processor in the system, otherwise @a false.
528 *
529 * Do not confuse this with _Thread_Is_executing() which checks only the
530 * current processor.
531 */
532RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
533  const Thread_Control *the_thread
534)
535{
536  return _CPU_Context_Get_is_executing( &the_thread->Registers );
537}
538#endif
539
540/**
541 * @brief Returns @a true and sets time_of_context_switch to the
542 * time of the last context switch when the thread is currently executing
543 * in the system, otherwise @a false.
544 */
545RTEMS_INLINE_ROUTINE bool _Thread_Get_time_of_last_context_switch(
546  Thread_Control    *the_thread,
547  Timestamp_Control *time_of_context_switch
548)
549{
550  bool retval = false;
551
552  _Thread_Disable_dispatch();
553  #ifndef RTEMS_SMP
554    if ( _Thread_Executing->Object.id == the_thread->Object.id ) {
555      *time_of_context_switch = _Thread_Time_of_last_context_switch;
556      retval = true;
557    }
558  #else
559    if ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
560      *time_of_context_switch =
561        _Thread_Get_CPU( the_thread )->time_of_last_context_switch;
562      retval = true;
563    }
564  #endif
565  _Thread_Enable_dispatch();
566  return retval;
567}
568
569
570/**
571 * This function returns true if the_thread is the heir
572 * thread, and false otherwise.
573 */
574
575RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
576  const Thread_Control *the_thread
577)
578{
579  return ( the_thread == _Thread_Heir );
580}
581
582/**
583 * This routine clears any blocking state for the_thread.  It performs
584 * any necessary scheduling operations including the selection of
585 * a new heir thread.
586 */
587
588RTEMS_INLINE_ROUTINE void _Thread_Unblock (
589  Thread_Control *the_thread
590)
591{
592  _Thread_Clear_state( the_thread, STATES_BLOCKED );
593}
594
595/**
596 * This routine resets the current context of the calling thread
597 * to that of its initial state.
598 */
599
600RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
601{
602#if defined(RTEMS_SMP)
603  ISR_Level level;
604
605  _Giant_Release( _Per_CPU_Get() );
606
607  _ISR_Disable_without_giant( level );
608  ( void ) level;
609#endif
610
611#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
612  if ( executing->fp_context != NULL )
613    _Context_Restore_fp( &executing->fp_context );
614#endif
615
616  _CPU_Context_Restart_self( &executing->Registers );
617}
618
619/**
620 * This function returns true if the floating point context of
621 * the_thread is currently loaded in the floating point unit, and
622 * false otherwise.
623 */
624
625#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
626RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
627  const Thread_Control *the_thread
628)
629{
630  return ( the_thread == _Thread_Allocated_fp );
631}
632#endif
633
634/*
635 *  If the CPU has hardware floating point, then we must address saving
636 *  and restoring it as part of the context switch.
637 *
638 *  The second conditional compilation section selects the algorithm used
639 *  to context switch between floating point tasks.  The deferred algorithm
640 *  can be significantly better in a system with few floating point tasks
641 *  because it reduces the total number of save and restore FP context
642 *  operations.  However, this algorithm can not be used on all CPUs due
643 *  to unpredictable use of FP registers by some compilers for integer
644 *  operations.
645 */
646
647RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
648{
649#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
650#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
651  if ( executing->fp_context != NULL )
652    _Context_Save_fp( &executing->fp_context );
653#endif
654#endif
655}
656
657RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
658{
659#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
660#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
661  if ( (executing->fp_context != NULL) &&
662       !_Thread_Is_allocated_fp( executing ) ) {
663    if ( _Thread_Allocated_fp != NULL )
664      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
665    _Context_Restore_fp( &executing->fp_context );
666    _Thread_Allocated_fp = executing;
667  }
668#else
669  if ( executing->fp_context != NULL )
670    _Context_Restore_fp( &executing->fp_context );
671#endif
672#endif
673}
674
675/**
676 * This routine is invoked when the currently loaded floating
677 * point context is now longer associated with an active thread.
678 */
679
680#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
681RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
682{
683  _Thread_Allocated_fp = NULL;
684}
685#endif
686
687/**
688 * This function returns true if dispatching is disabled, and false
689 * otherwise.
690 */
691
692RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
693{
694  return ( _Thread_Dispatch_necessary );
695}
696
697/**
698 * This function returns true if the_thread is NULL and false otherwise.
699 */
700
701RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
702  const Thread_Control *the_thread
703)
704{
705  return ( the_thread == NULL );
706}
707
708/**
709 * @brief Is proxy blocking.
710 *
711 * status which indicates that a proxy is blocking, and false otherwise.
712 */
713RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
714  uint32_t   code
715)
716{
717  return (code == THREAD_STATUS_PROXY_BLOCKING);
718}
719
720RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
721{
722  /* Idle threads */
723  uint32_t maximum_internal_threads =
724    rtems_configuration_get_maximum_processors();
725
726  /* MPCI thread */
727#if defined(RTEMS_MULTIPROCESSING)
728  if ( _System_state_Is_multiprocessing ) {
729    ++maximum_internal_threads;
730  }
731#endif
732
733  return maximum_internal_threads;
734}
735
736RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
737{
738  return (Thread_Control *)
739    _Objects_Allocate_unprotected( &_Thread_Internal_information );
740}
741
742/**
743 * @brief Gets the heir of the processor and makes it executing.
744 *
745 * The thread dispatch necessary indicator is cleared as a side-effect.
746 *
747 * @return The heir thread.
748 *
749 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
750 * _Thread_Dispatch_update_heir().
751 */
752RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
753  Per_CPU_Control *cpu_self
754)
755{
756  Thread_Control *heir;
757
758  cpu_self->dispatch_necessary = false;
759
760#if defined( RTEMS_SMP )
761  /*
762   * It is critical that we first update the dispatch necessary and then the
763   * read the heir so that we don't miss an update by
764   * _Thread_Dispatch_update_heir().
765   */
766  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
767#endif
768
769  heir = cpu_self->heir;
770  cpu_self->executing = heir;
771
772  return heir;
773}
774
775#if defined( RTEMS_SMP )
776RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
777  Per_CPU_Control *cpu_self,
778  Per_CPU_Control *cpu_for_heir,
779  Thread_Control  *heir
780)
781{
782  cpu_for_heir->heir = heir;
783
784  /*
785   * It is critical that we first update the heir and then the dispatch
786   * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
787   * update.
788   */
789  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
790
791  /*
792   * Only update the dispatch necessary indicator if not already set to
793   * avoid superfluous inter-processor interrupts.
794   */
795  if ( !cpu_for_heir->dispatch_necessary ) {
796    cpu_for_heir->dispatch_necessary = true;
797
798    if ( cpu_for_heir != cpu_self ) {
799      _Per_CPU_Send_interrupt( cpu_for_heir );
800    }
801  }
802}
803#endif
804
805RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used(
806  Thread_Control *executing,
807  Timestamp_Control *time_of_last_context_switch
808)
809{
810  Timestamp_Control uptime;
811  Timestamp_Control ran;
812
813  _TOD_Get_uptime( &uptime );
814  _Timestamp_Subtract(
815    time_of_last_context_switch,
816    &uptime,
817    &ran
818  );
819  *time_of_last_context_switch = uptime;
820  _Timestamp_Add_to( &executing->cpu_time_used, &ran );
821}
822
823RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
824  Thread_Action_control *action_control
825)
826{
827  _Chain_Initialize_empty( &action_control->Chain );
828}
829
830RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
831  Thread_Action         *action,
832  Thread_Action_handler  handler
833)
834{
835  action->handler = handler;
836  _Chain_Set_off_chain( &action->Node );
837}
838
839RTEMS_INLINE_ROUTINE Per_CPU_Control *
840  _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level )
841{
842  Per_CPU_Control *cpu;
843
844  _ISR_Disable_without_giant( *level );
845  cpu = _Per_CPU_Get();
846  _Per_CPU_Acquire( cpu );
847
848  return cpu;
849}
850
851RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire(
852  Thread_Control *thread,
853  ISR_Level      *level
854)
855{
856  Per_CPU_Control *cpu;
857
858  _ISR_Disable_without_giant( *level );
859  cpu = _Thread_Get_CPU( thread );
860  _Per_CPU_Acquire( cpu );
861
862  return cpu;
863}
864
865RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable(
866  Per_CPU_Control *cpu,
867  ISR_Level level
868)
869{
870  _Per_CPU_Release_and_ISR_enable( cpu, level );
871}
872
873RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
874  Thread_Control *thread,
875  Thread_Action  *action
876)
877{
878  Per_CPU_Control *cpu_of_thread;
879  ISR_Level        level;
880
881  cpu_of_thread = _Thread_Action_ISR_disable_and_acquire( thread, &level );
882  cpu_of_thread->dispatch_necessary = true;
883
884#if defined(RTEMS_SMP)
885  if ( _Per_CPU_Get() != cpu_of_thread ) {
886    _Per_CPU_Send_interrupt( cpu_of_thread );
887  }
888#endif
889
890  _Chain_Append_if_is_off_chain_unprotected(
891    &thread->Post_switch_actions.Chain,
892    &action->Node
893  );
894
895  _Thread_Action_release_and_ISR_enable( cpu_of_thread, level );
896}
897
898RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
899  Thread_Life_state life_state
900)
901{
902  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
903}
904
905RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
906  Thread_Life_state life_state
907)
908{
909  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
910}
911
912RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected(
913  Thread_Life_state life_state
914)
915{
916  return ( life_state & THREAD_LIFE_PROTECTED ) != 0;
917}
918
919RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
920  Thread_Life_state life_state
921)
922{
923  return ( life_state & THREAD_LIFE_RESTARTING_TERMINATING ) != 0;
924}
925
926/**
927 * @brief Returns true if the thread owns resources, and false otherwise.
928 *
929 * Resources are accounted with the Thread_Control::resource_count resource
930 * counter.  This counter is used by semaphore objects for example.
931 *
932 * In addition to the resource counter there is a resource dependency tree
933 * available on SMP configurations.  In case this tree is non-empty, then the
934 * thread owns resources.
935 *
936 * @param[in] the_thread The thread.
937 */
938RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
939  const Thread_Control *the_thread
940)
941{
942  bool owns_resources = the_thread->resource_count != 0;
943
944#if defined(RTEMS_SMP)
945  owns_resources = owns_resources
946    || _Resource_Node_owns_resources( &the_thread->Resource_node );
947#endif
948
949  return owns_resources;
950}
951
952/**
953 * @brief Acquires the default thread lock and returns the executing thread.
954 *
955 * @param[in] lock_context The lock context used for the corresponding lock
956 * release.
957 *
958 * @return The executing thread.
959 *
960 * @see _Thread_Lock_release_default().
961 */
962RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Lock_acquire_default_for_executing(
963  ISR_lock_Context *lock_context
964)
965{
966  Thread_Control *executing;
967
968  _ISR_lock_ISR_disable( lock_context );
969  executing = _Thread_Executing;
970  _ISR_lock_Acquire( &executing->Lock.Default, lock_context );
971
972  return executing;
973}
974
975/**
976 * @brief Acquires the default thread lock inside a critical section
977 * (interrupts disabled).
978 *
979 * @param[in] the_thread The thread.
980 * @param[in] lock_context The lock context used for the corresponding lock
981 * release.
982 *
983 * @see _Thread_Lock_release_default().
984 */
985RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default_critical(
986  Thread_Control   *the_thread,
987  ISR_lock_Context *lock_context
988)
989{
990  _Assert( _ISR_Get_level() != 0 );
991  _ISR_lock_Acquire( &the_thread->Lock.Default, lock_context );
992}
993
994/**
995 * @brief Acquires the default thread lock.
996 *
997 * @param[in] the_thread The thread.
998 * @param[in] lock_context The lock context used for the corresponding lock
999 * release.
1000 *
1001 * @see _Thread_Lock_release_default().
1002 */
1003RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default(
1004  Thread_Control   *the_thread,
1005  ISR_lock_Context *lock_context
1006)
1007{
1008  _ISR_lock_ISR_disable_and_acquire( &the_thread->Lock.Default, lock_context );
1009}
1010
1011/**
1012 * @brief Releases the thread lock inside a critical section (interrupts
1013 * disabled).
1014 *
1015 * The previous interrupt status is not restored.
1016 *
1017 * @param[in] lock The lock.
1018 * @param[in] lock_context The lock context used for the corresponding lock
1019 * acquire.
1020 */
1021RTEMS_INLINE_ROUTINE void _Thread_Lock_release_critical(
1022  ISR_lock_Control *lock,
1023  ISR_lock_Context *lock_context
1024)
1025{
1026  _ISR_lock_Release( lock, lock_context );
1027}
1028
1029/**
1030 * @brief Releases the thread lock.
1031 *
1032 * @param[in] lock The lock returned by _Thread_Lock_acquire().
1033 * @param[in] lock_context The lock context used for _Thread_Lock_acquire().
1034 */
1035RTEMS_INLINE_ROUTINE void _Thread_Lock_release(
1036  ISR_lock_Control *lock,
1037  ISR_lock_Context *lock_context
1038)
1039{
1040  _Thread_Lock_release_critical( lock, lock_context );
1041  _ISR_lock_ISR_enable( lock_context );
1042}
1043
1044/**
1045 * @brief Releases the default thread lock inside a critical section
1046 * (interrupts disabled).
1047 *
1048 * The previous interrupt status is not restored.
1049 *
1050 * @param[in] the_thread The thread.
1051 * @param[in] lock_context The lock context used for the corresponding lock
1052 * acquire.
1053 */
1054RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default_critical(
1055  Thread_Control   *the_thread,
1056  ISR_lock_Context *lock_context
1057)
1058{
1059  _Thread_Lock_release_critical(
1060#if defined(RTEMS_SMP)
1061    &the_thread->Lock.Default,
1062#else
1063    NULL,
1064#endif
1065    lock_context
1066  );
1067}
1068
1069/**
1070 * @brief Releases the default thread lock.
1071 *
1072 * @param[in] the_thread The thread.
1073 * @param[in] lock_context The lock context used for the corresponding lock
1074 * acquire.
1075 */
1076RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default(
1077  Thread_Control   *the_thread,
1078  ISR_lock_Context *lock_context
1079)
1080{
1081  _Thread_Lock_release_default_critical( the_thread, lock_context );
1082  _ISR_lock_ISR_enable( lock_context );
1083}
1084
1085/**
1086 * @brief Acquires the thread lock.
1087 *
1088 * @param[in] the_thread The thread.
1089 * @param[in] lock_context The lock context for _Thread_Lock_release().
1090 *
1091 * @return The lock required by _Thread_Lock_release().
1092 */
1093RTEMS_INLINE_ROUTINE ISR_lock_Control *_Thread_Lock_acquire(
1094  Thread_Control   *the_thread,
1095  ISR_lock_Context *lock_context
1096)
1097{
1098#if defined(RTEMS_SMP)
1099  ISR_lock_Control *lock;
1100
1101  while ( true ) {
1102    uint32_t my_generation;
1103
1104    _ISR_lock_ISR_disable( lock_context );
1105    my_generation = the_thread->Lock.generation;
1106
1107    /*
1108     * Ensure that we read the initial lock generation before we obtain our
1109     * current lock.
1110     */
1111    _Atomic_Fence( ATOMIC_ORDER_ACQUIRE );
1112
1113    lock = the_thread->Lock.current;
1114    _ISR_lock_Acquire( lock, lock_context );
1115
1116    /*
1117     * Ensure that we read the second lock generation after we obtained our
1118     * current lock.
1119     */
1120    _Atomic_Fence( ATOMIC_ORDER_ACQUIRE );
1121
1122    if ( the_thread->Lock.generation == my_generation ) {
1123      break;
1124    }
1125
1126    _Thread_Lock_release( lock, lock_context );
1127  }
1128
1129  return lock;
1130#else
1131  _ISR_Disable( lock_context->isr_level );
1132
1133  return NULL;
1134#endif
1135}
1136
1137#if defined(RTEMS_SMP)
1138/*
1139 * Internal function, use _Thread_Lock_set() or _Thread_Lock_restore_default()
1140 * instead.
1141 */
1142RTEMS_INLINE_ROUTINE void _Thread_Lock_set_unprotected(
1143  Thread_Control   *the_thread,
1144  ISR_lock_Control *new_lock
1145)
1146{
1147  the_thread->Lock.current = new_lock;
1148
1149  /*
1150   * Ensure that the new lock is visible before we update the generation
1151   * number.  Otherwise someone would be able to read an up to date generation
1152   * number and an old lock.
1153   */
1154  _Atomic_Fence( ATOMIC_ORDER_RELEASE );
1155
1156  /*
1157   * Since we set a new lock right before, this increment is not protected by a
1158   * lock and thus must be an atomic operation.
1159   */
1160  _Atomic_Fetch_add_uint(
1161    &the_thread->Lock.generation,
1162    1,
1163    ATOMIC_ORDER_RELAXED
1164  );
1165}
1166#endif
1167
1168/**
1169 * @brief Sets a new thread lock.
1170 *
1171 * The caller must not be the owner of the default thread lock.  The caller
1172 * must be the owner of the new lock.
1173 *
1174 * @param[in] the_thread The thread.
1175 * @param[in] new_lock The new thread lock.
1176 */
1177#if defined(RTEMS_SMP)
1178RTEMS_INLINE_ROUTINE void _Thread_Lock_set(
1179  Thread_Control   *the_thread,
1180  ISR_lock_Control *new_lock
1181)
1182{
1183  ISR_lock_Context lock_context;
1184
1185  _Thread_Lock_acquire_default_critical( the_thread, &lock_context );
1186  _Assert( the_thread->Lock.current == &the_thread->Lock.Default );
1187  _Thread_Lock_set_unprotected( the_thread, new_lock );
1188  _Thread_Lock_release_default_critical( the_thread, &lock_context );
1189}
1190#else
1191#define _Thread_Lock_set( the_thread, new_lock ) \
1192  do { } while ( 0 )
1193#endif
1194
1195/**
1196 * @brief Restores the default thread lock.
1197 *
1198 * The caller must be the owner of the current thread lock.
1199 *
1200 * @param[in] the_thread The thread.
1201 */
1202#if defined(RTEMS_SMP)
1203RTEMS_INLINE_ROUTINE void _Thread_Lock_restore_default(
1204  Thread_Control *the_thread
1205)
1206{
1207  _Atomic_Fence( ATOMIC_ORDER_RELEASE );
1208
1209  _Thread_Lock_set_unprotected( the_thread, &the_thread->Lock.Default );
1210}
1211#else
1212#define _Thread_Lock_restore_default( the_thread ) \
1213  do { } while ( 0 )
1214#endif
1215
1216/**
1217 * @brief The initial thread wait flags value set by _Thread_Initialize().
1218 */
1219#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1220
1221/**
1222 * @brief Mask to get the thread wait state flags.
1223 */
1224#define THREAD_WAIT_STATE_MASK 0xffU
1225
1226/**
1227 * @brief Indicates that the thread begins with the blocking operation.
1228 *
1229 * A blocking operation consists of an optional watchdog initialization and the
1230 * setting of the appropriate thread blocking state with the corresponding
1231 * scheduler block operation.
1232 */
1233#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1234
1235/**
1236 * @brief Indicates that the thread completed the blocking operation.
1237 */
1238#define THREAD_WAIT_STATE_BLOCKED 0x2U
1239
1240/**
1241 * @brief Indicates that a condition to end the thread wait occurred.
1242 *
1243 * This could be a timeout, a signal, an event or a resource availability.
1244 */
1245#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1246
1247/**
1248 * @brief Mask to get the thread wait class flags.
1249 */
1250#define THREAD_WAIT_CLASS_MASK 0xff00U
1251
1252/**
1253 * @brief Indicates that the thread waits for an event.
1254 */
1255#define THREAD_WAIT_CLASS_EVENT 0x100U
1256
1257/**
1258 * @brief Indicates that the thread waits for a system event.
1259 */
1260#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1261
1262/**
1263 * @brief Indicates that the thread waits for a object.
1264 */
1265#define THREAD_WAIT_CLASS_OBJECT 0x400U
1266
1267RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1268  Thread_Control    *the_thread,
1269  Thread_Wait_flags  flags
1270)
1271{
1272#if defined(RTEMS_SMP)
1273  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1274#else
1275  the_thread->Wait.flags = flags;
1276#endif
1277}
1278
1279RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1280  const Thread_Control *the_thread
1281)
1282{
1283#if defined(RTEMS_SMP)
1284  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1285#else
1286  return the_thread->Wait.flags;
1287#endif
1288}
1289
1290/**
1291 * @brief Tries to change the thread wait flags inside a critical section
1292 * (interrupts disabled).
1293 *
1294 * In case the wait flags are equal to the expected wait flags, then the wait
1295 * flags are set to the desired wait flags.
1296 *
1297 * @param[in] the_thread The thread.
1298 * @param[in] expected_flags The expected wait flags.
1299 * @param[in] desired_flags The desired wait flags.
1300 *
1301 * @retval true The wait flags were equal to the expected wait flags.
1302 * @retval false Otherwise.
1303 */
1304RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_critical(
1305  Thread_Control    *the_thread,
1306  Thread_Wait_flags  expected_flags,
1307  Thread_Wait_flags  desired_flags
1308)
1309{
1310#if defined(RTEMS_SMP)
1311  return _Atomic_Compare_exchange_uint(
1312    &the_thread->Wait.flags,
1313    &expected_flags,
1314    desired_flags,
1315    ATOMIC_ORDER_RELAXED,
1316    ATOMIC_ORDER_RELAXED
1317  );
1318#else
1319  bool success = the_thread->Wait.flags == expected_flags;
1320
1321  if ( success ) {
1322    the_thread->Wait.flags = desired_flags;
1323  }
1324
1325  return success;
1326#endif
1327}
1328
1329/**
1330 * @brief Tries to change the thread wait flags.
1331 *
1332 * @see _Thread_Wait_flags_try_change_critical().
1333 */
1334RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change(
1335  Thread_Control    *the_thread,
1336  Thread_Wait_flags  expected_flags,
1337  Thread_Wait_flags  desired_flags
1338)
1339{
1340  bool success;
1341#if !defined(RTEMS_SMP)
1342  ISR_Level level;
1343
1344  _ISR_Disable_without_giant( level );
1345#endif
1346
1347  success = _Thread_Wait_flags_try_change_critical(
1348    the_thread,
1349    expected_flags,
1350    desired_flags
1351  );
1352
1353#if !defined(RTEMS_SMP)
1354  _ISR_Enable_without_giant( level );
1355#endif
1356
1357  return success;
1358}
1359
1360/**
1361 * @brief Sets the thread queue.
1362 *
1363 * The caller must be the owner of the thread lock.
1364 *
1365 * @param[in] the_thread The thread.
1366 * @param[in] new_queue The new queue.
1367 *
1368 * @see _Thread_Lock_set().
1369 */
1370RTEMS_INLINE_ROUTINE void _Thread_Wait_set_queue(
1371  Thread_Control       *the_thread,
1372  Thread_queue_Control *new_queue
1373)
1374{
1375  the_thread->Wait.queue = new_queue;
1376}
1377
1378/**
1379 * @brief Sets the thread queue operations.
1380 *
1381 * The caller must be the owner of the thread lock.
1382 *
1383 * @param[in] the_thread The thread.
1384 * @param[in] new_operations The new queue operations.
1385 *
1386 * @see _Thread_Lock_set() and _Thread_Wait_restore_default_operations().
1387 */
1388RTEMS_INLINE_ROUTINE void _Thread_Wait_set_operations(
1389  Thread_Control                *the_thread,
1390  const Thread_queue_Operations *new_operations
1391)
1392{
1393  the_thread->Wait.operations = new_operations;
1394}
1395
1396/**
1397 * @brief Restores the default thread queue operations.
1398 *
1399 * The caller must be the owner of the thread lock.
1400 *
1401 * @param[in] the_thread The thread.
1402 *
1403 * @see _Thread_Wait_set_operations().
1404 */
1405RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default_operations(
1406  Thread_Control *the_thread
1407)
1408{
1409  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1410}
1411
1412/**
1413 * @brief Sets the thread wait timeout code.
1414 *
1415 * @param[in] the_thread The thread.
1416 * @param[in] timeout_code The new thread wait timeout code.
1417 */
1418RTEMS_INLINE_ROUTINE void _Thread_Wait_set_timeout_code(
1419  Thread_Control *the_thread,
1420  uint32_t        timeout_code
1421)
1422{
1423  the_thread->Wait.timeout_code = timeout_code;
1424}
1425
1426/**
1427 * @brief General purpose thread wait timeout.
1428 *
1429 * @param[in] id Unused.
1430 * @param[in] arg The thread.
1431 */
1432void _Thread_Timeout( Objects_Id id, void *arg );
1433
1434RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
1435  Thread_Control  *the_thread,
1436  Per_CPU_Control *cpu
1437)
1438{
1439#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
1440  the_thread->Scheduler.debug_real_cpu = cpu;
1441#else
1442  (void) the_thread;
1443  (void) cpu;
1444#endif
1445}
1446
1447#if !defined(__DYNAMIC_REENT__)
1448/**
1449 * This routine returns the C library re-enterant pointer.
1450 */
1451
1452RTEMS_INLINE_ROUTINE struct _reent **_Thread_Get_libc_reent( void )
1453{
1454  return _Thread_libc_reent;
1455}
1456
1457/**
1458 * This routine set the C library re-enterant pointer.
1459 */
1460
1461RTEMS_INLINE_ROUTINE void _Thread_Set_libc_reent (
1462  struct _reent **libc_reent
1463)
1464{
1465  _Thread_libc_reent = libc_reent;
1466}
1467#endif
1468
1469/** @}*/
1470
1471#ifdef __cplusplus
1472}
1473#endif
1474
1475#if defined(RTEMS_MULTIPROCESSING)
1476#include <rtems/score/threadmp.h>
1477#endif
1478
1479#endif
1480/* end of include file */
Note: See TracBrowser for help on using the repository browser.