source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ edcf89b

4.115
Last change on this file since edcf89b was edcf89b, checked in by Sebastian Huber <sebastian.huber@…>, on 03/27/15 at 20:08:21

rtems: Atomically suspend/resume tasks

  • Property mode set to 100644
File size: 31.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/chainimpl.h>
26#include <rtems/score/interr.h>
27#include <rtems/score/isr.h>
28#include <rtems/score/objectimpl.h>
29#include <rtems/score/resourceimpl.h>
30#include <rtems/score/statesimpl.h>
31#include <rtems/score/sysstate.h>
32#include <rtems/score/todimpl.h>
33#include <rtems/config.h>
34
35#ifdef __cplusplus
36extern "C" {
37#endif
38
39/**
40 * @addtogroup ScoreThread
41 */
42/**@{**/
43
44/**
45 *  The following structure contains the information necessary to manage
46 *  a thread which it is  waiting for a resource.
47 */
48#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
49
50/**
51 *  Self for the GNU Ada Run-Time
52 */
53SCORE_EXTERN void *rtems_ada_self;
54
55/**
56 *  The following defines the information control block used to
57 *  manage this class of objects.
58 */
59SCORE_EXTERN Objects_Information _Thread_Internal_information;
60
61/**
62 *  The following points to the thread whose floating point
63 *  context is currently loaded.
64 */
65#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
66SCORE_EXTERN Thread_Control *_Thread_Allocated_fp;
67#endif
68
69#if !defined(__DYNAMIC_REENT__)
70/**
71 * The C library re-enter-rant global pointer. Some C library implementations
72 * such as newlib have a single global pointer that changed during a context
73 * switch. The pointer points to that global pointer. The Thread control block
74 * holds a pointer to the task specific data.
75 */
76SCORE_EXTERN struct _reent **_Thread_libc_reent;
77#endif
78
79#define THREAD_RBTREE_NODE_TO_THREAD( node ) \
80  RTEMS_CONTAINER_OF( node, Thread_Control, RBNode )
81
82#if defined(RTEMS_SMP)
83#define THREAD_RESOURCE_NODE_TO_THREAD( node ) \
84  RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node )
85#endif
86
87/**
88 *  @brief Initialize thread handler.
89 *
90 *  This routine performs the initialization necessary for this handler.
91 */
92void _Thread_Handler_initialization(void);
93
94/**
95 *  @brief Create idle thread.
96 *
97 *  This routine creates the idle thread.
98 *
99 *  @warning No thread should be created before this one.
100 */
101void _Thread_Create_idle(void);
102
103/**
104 *  @brief Start thread multitasking.
105 *
106 *  This routine initiates multitasking.  It is invoked only as
107 *  part of initialization and its invocation is the last act of
108 *  the non-multitasking part of the system initialization.
109 */
110void _Thread_Start_multitasking( void ) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
111
112/**
113 *  @brief Allocate the requested stack space for the thread.
114 *
115 *  Allocate the requested stack space for the thread.
116 *  Set the Start.stack field to the address of the stack.
117 *
118 *  @param[in] the_thread is the thread where the stack space is requested
119 *  @param[in] stack_size is the stack space is requested
120 *
121 *  @retval actual size allocated after any adjustment
122 *  @retval zero if the allocation failed
123 */
124size_t _Thread_Stack_Allocate(
125  Thread_Control *the_thread,
126  size_t          stack_size
127);
128
129/**
130 *  @brief Deallocate thread stack.
131 *
132 *  Deallocate the Thread's stack.
133 */
134void _Thread_Stack_Free(
135  Thread_Control *the_thread
136);
137
138/**
139 *  @brief Initialize thread.
140 *
141 *  This routine initializes the specified the thread.  It allocates
142 *  all memory associated with this thread.  It completes by adding
143 *  the thread to the local object table so operations on this
144 *  thread id are allowed.
145 *
146 *  @note If stack_area is NULL, it is allocated from the workspace.
147 *
148 *  @note If the stack is allocated from the workspace, then it is
149 *        guaranteed to be of at least minimum size.
150 */
151bool _Thread_Initialize(
152  Objects_Information                  *information,
153  Thread_Control                       *the_thread,
154  const struct Scheduler_Control       *scheduler,
155  void                                 *stack_area,
156  size_t                                stack_size,
157  bool                                  is_fp,
158  Priority_Control                      priority,
159  bool                                  is_preemptible,
160  Thread_CPU_budget_algorithms          budget_algorithm,
161  Thread_CPU_budget_algorithm_callout   budget_callout,
162  uint32_t                              isr_level,
163  Objects_Name                          name
164);
165
166/**
167 *  @brief Initializes thread and executes it.
168 *
169 *  This routine initializes the executable information for a thread
170 *  and makes it ready to execute.  After this routine executes, the
171 *  thread competes with all other threads for CPU time.
172 *
173 *  @param the_thread is the thread to be initialized
174 *  @param the_prototype
175 *  @param entry_point
176 *  @param pointer_argument
177 *  @param numeric_argument
178 *  @param[in,out] cpu The processor if used to start an idle thread
179 *  during system initialization.  Must be set to @c NULL to start a normal
180 *  thread.
181 */
182bool _Thread_Start(
183  Thread_Control            *the_thread,
184  Thread_Start_types         the_prototype,
185  void                      *entry_point,
186  void                      *pointer_argument,
187  Thread_Entry_numeric_type  numeric_argument,
188  Per_CPU_Control           *cpu
189);
190
191bool _Thread_Restart(
192  Thread_Control            *the_thread,
193  Thread_Control            *executing,
194  void                      *pointer_argument,
195  Thread_Entry_numeric_type  numeric_argument
196);
197
198void _Thread_Yield( Thread_Control *executing );
199
200bool _Thread_Set_life_protection( bool protect );
201
202void _Thread_Life_action_handler(
203  Thread_Control  *executing,
204  Thread_Action   *action,
205  Per_CPU_Control *cpu,
206  ISR_Level        level
207);
208
209/**
210 * @brief Kills all zombie threads in the system.
211 *
212 * Threads change into the zombie state as the last step in the thread
213 * termination sequence right before a context switch to the heir thread is
214 * initiated.  Since the thread stack is still in use during this phase we have
215 * to postpone the thread stack reclamation until this point.  On SMP
216 * configurations we may have to busy wait for context switch completion here.
217 */
218void _Thread_Kill_zombies( void );
219
220/**
221 * @brief Closes the thread.
222 *
223 * Closes the thread object and starts the thread termination sequence.  In
224 * case the executing thread is not terminated, then this function waits until
225 * the terminating thread reached the zombie state.
226 */
227void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
228
229/**
230 *  @brief Removes any set states for @a the_thread.
231 *
232 *  This routine removes any set states for @a the_thread.  It performs
233 *  any necessary scheduling operations including the selection of
234 *  a new heir thread.
235 *
236 *  - INTERRUPT LATENCY:
237 *    + ready chain
238 *    + select heir
239 */
240void _Thread_Ready(
241  Thread_Control *the_thread
242);
243
244/**
245 * @brief Clears the specified thread state.
246 *
247 * In case the previous state is a non-ready state and the next state is the
248 * ready state, then the thread is unblocked by the scheduler.
249 *
250 * @param[in] the_thread The thread.
251 * @param[in] state The state to clear.  It must not be zero.
252 *
253 * @return The previous state.
254 */
255States_Control _Thread_Clear_state(
256  Thread_Control *the_thread,
257  States_Control  state
258);
259
260/**
261 * @brief Sets the specified thread state.
262 *
263 * In case the previous state is the ready state, then the thread is blocked by
264 * the scheduler.
265 *
266 * @param[in] the_thread The thread.
267 * @param[in] state The state to set.  It must not be zero.
268 *
269 * @return The previous state.
270 */
271States_Control _Thread_Set_state(
272  Thread_Control *the_thread,
273  States_Control  state
274);
275
276/**
277 *  @brief Initializes enviroment for a thread.
278 *
279 *  This routine initializes the context of @a the_thread to its
280 *  appropriate starting state.
281 *
282 *  @param[in] the_thread is the pointer to the thread control block.
283 */
284void _Thread_Load_environment(
285  Thread_Control *the_thread
286);
287
288/**
289 *  @brief Wrapper function for all threads.
290 *
291 *  This routine is the wrapper function for all threads.  It is
292 *  the starting point for all threads.  The user provided thread
293 *  entry point is invoked by this routine.  Operations
294 *  which must be performed immediately before and after the user's
295 *  thread executes are found here.
296 *
297 *  @note On entry, it is assumed all interrupts are blocked and that this
298 *  routine needs to set the initial isr level.  This may or may not
299 *  actually be needed by the context switch routine and as a result
300 *  interrupts may already be at there proper level.  Either way,
301 *  setting the initial isr level properly here is safe.
302 */
303void _Thread_Handler( void );
304
305/**
306 * @brief Executes the global constructors and then restarts itself as the
307 * first initialization thread.
308 *
309 * The first initialization thread is the first RTEMS initialization task or
310 * the first POSIX initialization thread in case no RTEMS initialization tasks
311 * are present.
312 */
313void *_Thread_Global_construction( void );
314
315/**
316 *  @brief Ended the delay of a thread.
317 *
318 *  This routine is invoked when a thread must be unblocked at the
319 *  end of a time based delay (i.e. wake after or wake when).
320 *  It is called by the watchdog handler.
321 *
322 *  @param[in] id is the thread id
323 *  @param[in] ignored is not used
324 */
325void _Thread_Delay_ended(
326  Objects_Id  id,
327  void       *ignored
328);
329
330/**
331 *  @brief Change the priority of a thread.
332 *
333 *  This routine changes the current priority of @a the_thread to
334 *  @a new_priority.  It performs any necessary scheduling operations
335 *  including the selection of a new heir thread.
336 *
337 *  @param[in] the_thread is the thread to change
338 *  @param[in] new_priority is the priority to set @a the_thread to
339 *  @param[in] prepend_it is a switch to prepend the thread
340 */
341void _Thread_Change_priority (
342  Thread_Control   *the_thread,
343  Priority_Control  new_priority,
344  bool              prepend_it
345);
346
347/**
348 *  @brief Set thread priority.
349 *
350 *  This routine updates the priority related fields in the_thread
351 *  control block to indicate the current priority is now new_priority.
352 */
353void _Thread_Set_priority(
354  Thread_Control   *the_thread,
355  Priority_Control  new_priority
356);
357
358/**
359 *  @brief Maps thread Id to a TCB pointer.
360 *
361 *  This function maps thread IDs to thread control
362 *  blocks.  If ID corresponds to a local thread, then it
363 *  returns the_thread control pointer which maps to ID
364 *  and @a location is set to OBJECTS_LOCAL.  If the thread ID is
365 *  global and resides on a remote node, then location is set
366 *  to OBJECTS_REMOTE, and the_thread is undefined.
367 *  Otherwise, location is set to OBJECTS_ERROR and
368 *  the_thread is undefined.
369 *
370 *  @param[in] id is the id of the thread.
371 *  @param[in] location is the location of the block.
372 *
373 *  @note  The performance of many RTEMS services depends upon
374 *         the quick execution of the "good object" path in this
375 *         routine.  If there is a possibility of saving a few
376 *         cycles off the execution time, this routine is worth
377 *         further optimization attention.
378 */
379Thread_Control *_Thread_Get (
380  Objects_Id         id,
381  Objects_Locations *location
382);
383
384/**
385 * @brief Acquires a thread by its identifier.
386 *
387 * @see _Objects_Acquire().
388 */
389Thread_Control *_Thread_Acquire(
390  Objects_Id         id,
391  Objects_Locations *location,
392  ISR_lock_Context  *lock_context
393);
394
395/**
396 * @brief Acquires the executing thread.
397 *
398 * @see _Objects_Acquire().
399 */
400Thread_Control *_Thread_Acquire_executing( ISR_lock_Context *lock_context );
401
402RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
403  const Thread_Control *thread
404)
405{
406#if defined(RTEMS_SMP)
407  return thread->Scheduler.cpu;
408#else
409  (void) thread;
410
411  return _Per_CPU_Get();
412#endif
413}
414
415RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
416  Thread_Control *thread,
417  Per_CPU_Control *cpu
418)
419{
420#if defined(RTEMS_SMP)
421  thread->Scheduler.cpu = cpu;
422#else
423  (void) thread;
424  (void) cpu;
425#endif
426}
427
428/**
429 * This function returns true if the_thread is the currently executing
430 * thread, and false otherwise.
431 */
432
433RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
434  const Thread_Control *the_thread
435)
436{
437  return ( the_thread == _Thread_Executing );
438}
439
440#if defined(RTEMS_SMP)
441/**
442 * @brief Returns @a true in case the thread executes currently on some
443 * processor in the system, otherwise @a false.
444 *
445 * Do not confuse this with _Thread_Is_executing() which checks only the
446 * current processor.
447 */
448RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
449  const Thread_Control *the_thread
450)
451{
452  return _CPU_Context_Get_is_executing( &the_thread->Registers );
453}
454#endif
455
456/**
457 * @brief Returns @a true and sets time_of_context_switch to the
458 * time of the last context switch when the thread is currently executing
459 * in the system, otherwise @a false.
460 */
461RTEMS_INLINE_ROUTINE bool _Thread_Get_time_of_last_context_switch(
462  Thread_Control    *the_thread,
463  Timestamp_Control *time_of_context_switch
464)
465{
466  bool retval = false;
467
468  _Thread_Disable_dispatch();
469  #ifndef RTEMS_SMP
470    if ( _Thread_Executing->Object.id == the_thread->Object.id ) {
471      *time_of_context_switch = _Thread_Time_of_last_context_switch;
472      retval = true;
473    }
474  #else
475    if ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
476      *time_of_context_switch =
477        _Thread_Get_CPU( the_thread )->time_of_last_context_switch;
478      retval = true;
479    }
480  #endif
481  _Thread_Enable_dispatch();
482  return retval;
483}
484
485
486/**
487 * This function returns true if the_thread is the heir
488 * thread, and false otherwise.
489 */
490
491RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
492  const Thread_Control *the_thread
493)
494{
495  return ( the_thread == _Thread_Heir );
496}
497
498/**
499 * This routine clears any blocking state for the_thread.  It performs
500 * any necessary scheduling operations including the selection of
501 * a new heir thread.
502 */
503
504RTEMS_INLINE_ROUTINE void _Thread_Unblock (
505  Thread_Control *the_thread
506)
507{
508  _Thread_Clear_state( the_thread, STATES_BLOCKED );
509}
510
511/**
512 * This routine resets the current context of the calling thread
513 * to that of its initial state.
514 */
515
516RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
517{
518#if defined(RTEMS_SMP)
519  ISR_Level level;
520
521  _Giant_Release( _Per_CPU_Get() );
522
523  _ISR_Disable_without_giant( level );
524  ( void ) level;
525#endif
526
527#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
528  if ( executing->fp_context != NULL )
529    _Context_Restore_fp( &executing->fp_context );
530#endif
531
532  _CPU_Context_Restart_self( &executing->Registers );
533}
534
535/**
536 * This function returns true if the floating point context of
537 * the_thread is currently loaded in the floating point unit, and
538 * false otherwise.
539 */
540
541#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
542RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
543  const Thread_Control *the_thread
544)
545{
546  return ( the_thread == _Thread_Allocated_fp );
547}
548#endif
549
550/*
551 *  If the CPU has hardware floating point, then we must address saving
552 *  and restoring it as part of the context switch.
553 *
554 *  The second conditional compilation section selects the algorithm used
555 *  to context switch between floating point tasks.  The deferred algorithm
556 *  can be significantly better in a system with few floating point tasks
557 *  because it reduces the total number of save and restore FP context
558 *  operations.  However, this algorithm can not be used on all CPUs due
559 *  to unpredictable use of FP registers by some compilers for integer
560 *  operations.
561 */
562
563RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
564{
565#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
566#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
567  if ( executing->fp_context != NULL )
568    _Context_Save_fp( &executing->fp_context );
569#endif
570#endif
571}
572
573RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
574{
575#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
576#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
577  if ( (executing->fp_context != NULL) &&
578       !_Thread_Is_allocated_fp( executing ) ) {
579    if ( _Thread_Allocated_fp != NULL )
580      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
581    _Context_Restore_fp( &executing->fp_context );
582    _Thread_Allocated_fp = executing;
583  }
584#else
585  if ( executing->fp_context != NULL )
586    _Context_Restore_fp( &executing->fp_context );
587#endif
588#endif
589}
590
591/**
592 * This routine is invoked when the currently loaded floating
593 * point context is now longer associated with an active thread.
594 */
595
596#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
597RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
598{
599  _Thread_Allocated_fp = NULL;
600}
601#endif
602
603/**
604 * This function returns true if dispatching is disabled, and false
605 * otherwise.
606 */
607
608RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
609{
610  return ( _Thread_Dispatch_necessary );
611}
612
613/**
614 * This function returns true if the_thread is NULL and false otherwise.
615 */
616
617RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
618  const Thread_Control *the_thread
619)
620{
621  return ( the_thread == NULL );
622}
623
624/**
625 * @brief Is proxy blocking.
626 *
627 * status which indicates that a proxy is blocking, and false otherwise.
628 */
629RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
630  uint32_t   code
631)
632{
633  return (code == THREAD_STATUS_PROXY_BLOCKING);
634}
635
636RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
637{
638  /* Idle threads */
639  uint32_t maximum_internal_threads =
640    rtems_configuration_get_maximum_processors();
641
642  /* MPCI thread */
643#if defined(RTEMS_MULTIPROCESSING)
644  if ( _System_state_Is_multiprocessing ) {
645    ++maximum_internal_threads;
646  }
647#endif
648
649  return maximum_internal_threads;
650}
651
652RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
653{
654  return (Thread_Control *)
655    _Objects_Allocate_unprotected( &_Thread_Internal_information );
656}
657
658/**
659 * @brief Gets the heir of the processor and makes it executing.
660 *
661 * The thread dispatch necessary indicator is cleared as a side-effect.
662 *
663 * @return The heir thread.
664 *
665 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
666 * _Thread_Dispatch_update_heir().
667 */
668RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
669  Per_CPU_Control *cpu_self
670)
671{
672  Thread_Control *heir;
673
674  cpu_self->dispatch_necessary = false;
675
676#if defined( RTEMS_SMP )
677  /*
678   * It is critical that we first update the dispatch necessary and then the
679   * read the heir so that we don't miss an update by
680   * _Thread_Dispatch_update_heir().
681   */
682  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
683#endif
684
685  heir = cpu_self->heir;
686  cpu_self->executing = heir;
687
688  return heir;
689}
690
691#if defined( RTEMS_SMP )
692RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
693  Per_CPU_Control *cpu_self,
694  Per_CPU_Control *cpu_for_heir,
695  Thread_Control  *heir
696)
697{
698  cpu_for_heir->heir = heir;
699
700  /*
701   * It is critical that we first update the heir and then the dispatch
702   * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
703   * update.
704   */
705  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
706
707  /*
708   * Only update the dispatch necessary indicator if not already set to
709   * avoid superfluous inter-processor interrupts.
710   */
711  if ( !cpu_for_heir->dispatch_necessary ) {
712    cpu_for_heir->dispatch_necessary = true;
713
714    if ( cpu_for_heir != cpu_self ) {
715      _Per_CPU_Send_interrupt( cpu_for_heir );
716    }
717  }
718}
719#endif
720
721RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used(
722  Thread_Control *executing,
723  Timestamp_Control *time_of_last_context_switch
724)
725{
726  Timestamp_Control uptime;
727  Timestamp_Control ran;
728
729  _TOD_Get_uptime( &uptime );
730  _Timestamp_Subtract(
731    time_of_last_context_switch,
732    &uptime,
733    &ran
734  );
735  *time_of_last_context_switch = uptime;
736  _Timestamp_Add_to( &executing->cpu_time_used, &ran );
737}
738
739RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
740  Thread_Action_control *action_control
741)
742{
743  _Chain_Initialize_empty( &action_control->Chain );
744}
745
746RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
747  Thread_Action         *action,
748  Thread_Action_handler  handler
749)
750{
751  action->handler = handler;
752  _Chain_Set_off_chain( &action->Node );
753}
754
755RTEMS_INLINE_ROUTINE Per_CPU_Control *
756  _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level )
757{
758  Per_CPU_Control *cpu;
759
760  _ISR_Disable_without_giant( *level );
761  cpu = _Per_CPU_Get();
762  _Per_CPU_Acquire( cpu );
763
764  return cpu;
765}
766
767RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire(
768  Thread_Control *thread,
769  ISR_Level      *level
770)
771{
772  Per_CPU_Control *cpu;
773
774  _ISR_Disable_without_giant( *level );
775  cpu = _Thread_Get_CPU( thread );
776  _Per_CPU_Acquire( cpu );
777
778  return cpu;
779}
780
781RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable(
782  Per_CPU_Control *cpu,
783  ISR_Level level
784)
785{
786  _Per_CPU_Release_and_ISR_enable( cpu, level );
787}
788
789RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
790  Thread_Control *thread,
791  Thread_Action  *action
792)
793{
794  Per_CPU_Control *cpu_of_thread;
795  ISR_Level        level;
796
797  cpu_of_thread = _Thread_Action_ISR_disable_and_acquire( thread, &level );
798  cpu_of_thread->dispatch_necessary = true;
799
800#if defined(RTEMS_SMP)
801  if ( _Per_CPU_Get() != cpu_of_thread ) {
802    _Per_CPU_Send_interrupt( cpu_of_thread );
803  }
804#endif
805
806  _Chain_Append_if_is_off_chain_unprotected(
807    &thread->Post_switch_actions.Chain,
808    &action->Node
809  );
810
811  _Thread_Action_release_and_ISR_enable( cpu_of_thread, level );
812}
813
814RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
815  Thread_Life_state life_state
816)
817{
818  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
819}
820
821RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
822  Thread_Life_state life_state
823)
824{
825  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
826}
827
828RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected(
829  Thread_Life_state life_state
830)
831{
832  return ( life_state & THREAD_LIFE_PROTECTED ) != 0;
833}
834
835RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
836  Thread_Life_state life_state
837)
838{
839  return ( life_state & THREAD_LIFE_RESTARTING_TERMINATING ) != 0;
840}
841
842/**
843 * @brief Returns true if the thread owns resources, and false otherwise.
844 *
845 * Resources are accounted with the Thread_Control::resource_count resource
846 * counter.  This counter is used by semaphore objects for example.
847 *
848 * In addition to the resource counter there is a resource dependency tree
849 * available on SMP configurations.  In case this tree is non-empty, then the
850 * thread owns resources.
851 *
852 * @param[in] the_thread The thread.
853 */
854RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
855  const Thread_Control *the_thread
856)
857{
858  bool owns_resources = the_thread->resource_count != 0;
859
860#if defined(RTEMS_SMP)
861  owns_resources = owns_resources
862    || _Resource_Node_owns_resources( &the_thread->Resource_node );
863#endif
864
865  return owns_resources;
866}
867
868/**
869 * @brief Release the thread lock.
870 *
871 * @param[in] lock The lock returned by _Thread_Lock_acquire().
872 * @param[in] lock_context The lock context used for _Thread_Lock_acquire().
873 */
874RTEMS_INLINE_ROUTINE void _Thread_Lock_release(
875  ISR_lock_Control *lock,
876  ISR_lock_Context *lock_context
877)
878{
879  _ISR_lock_Release_and_ISR_enable( lock, lock_context );
880}
881
882/**
883 * @brief Acquires the thread lock.
884 *
885 * @param[in] the_thread The thread.
886 * @param[in] lock_context The lock context for _Thread_Lock_release().
887 *
888 * @return The lock required by _Thread_Lock_release().
889 */
890RTEMS_INLINE_ROUTINE ISR_lock_Control *_Thread_Lock_acquire(
891  Thread_Control   *the_thread,
892  ISR_lock_Context *lock_context
893)
894{
895#if defined(RTEMS_SMP)
896  ISR_lock_Control *lock;
897
898  while ( true ) {
899    uint32_t my_generation;
900
901    _ISR_Disable_without_giant( lock_context->Lock_context.isr_level );
902    my_generation = the_thread->Lock.generation;
903
904    /*
905     * Ensure that we read the initial lock generation before we obtain our
906     * current lock.
907     */
908    _Atomic_Fence( ATOMIC_ORDER_ACQUIRE );
909
910    lock = the_thread->Lock.current;
911    _ISR_lock_Acquire( lock, lock_context );
912
913    /*
914     * Ensure that we read the second lock generation after we obtained our
915     * current lock.
916     */
917    _Atomic_Fence( ATOMIC_ORDER_ACQUIRE );
918
919    if ( the_thread->Lock.generation == my_generation ) {
920      break;
921    }
922
923    _Thread_Lock_release( lock, lock_context );
924  }
925
926  return lock;
927#else
928  _ISR_Disable( lock_context->isr_level );
929
930  return NULL;
931#endif
932}
933
934#if defined(RTEMS_SMP)
935/*
936 * Internal function, use _Thread_Lock_set() or _Thread_Lock_restore_default()
937 * instead.
938 */
939RTEMS_INLINE_ROUTINE void _Thread_Lock_set_unprotected(
940  Thread_Control   *the_thread,
941  ISR_lock_Control *new_lock
942)
943{
944  the_thread->Lock.current = new_lock;
945
946  /*
947   * Ensure that the new lock is visible before we update the generation
948   * number.  Otherwise someone would be able to read an up to date generation
949   * number and an old lock.
950   */
951  _Atomic_Fence( ATOMIC_ORDER_RELEASE );
952
953  /*
954   * Since we set a new lock right before, this increment is not protected by a
955   * lock and thus must be an atomic operation.
956   */
957  _Atomic_Fetch_add_uint(
958    &the_thread->Lock.generation,
959    1,
960    ATOMIC_ORDER_RELAXED
961  );
962}
963#endif
964
965/**
966 * @brief Sets a new thread lock.
967 *
968 * The caller must not be the owner of the default thread lock.  The caller
969 * must be the owner of the new lock.
970 *
971 * @param[in] the_thread The thread.
972 * @param[in] new_lock The new thread lock.
973 */
974#if defined(RTEMS_SMP)
975RTEMS_INLINE_ROUTINE void _Thread_Lock_set(
976  Thread_Control   *the_thread,
977  ISR_lock_Control *new_lock
978)
979{
980  ISR_lock_Control *lock;
981  ISR_lock_Context  lock_context;
982
983  lock = _Thread_Lock_acquire( the_thread, &lock_context );
984  _Thread_Lock_set_unprotected( the_thread, new_lock );
985  _Thread_Lock_release( lock, &lock_context );
986}
987#else
988#define _Thread_Lock_set( the_thread, new_lock ) \
989  do { } while ( 0 )
990#endif
991
992/**
993 * @brief Restores the default thread lock.
994 *
995 * The caller must be the owner of the current thread lock.
996 *
997 * @param[in] the_thread The thread.
998 */
999#if defined(RTEMS_SMP)
1000RTEMS_INLINE_ROUTINE void _Thread_Lock_restore_default(
1001  Thread_Control *the_thread
1002)
1003{
1004  _Atomic_Fence( ATOMIC_ORDER_RELEASE );
1005
1006  _Thread_Lock_set_unprotected( the_thread, &the_thread->Lock.Default );
1007}
1008#else
1009#define _Thread_Lock_restore_default( the_thread ) \
1010  do { } while ( 0 )
1011#endif
1012
1013void _Thread_Priority_change_do_nothing(
1014  Thread_Control   *the_thread,
1015  Priority_Control  new_priority,
1016  void             *context
1017);
1018
1019/**
1020 * @brief Sets the thread priority change handler and its context.
1021 *
1022 * @param[in] the_thread The thread.
1023 * @param[in] new_handler The new handler.
1024 * @param[in] new_context The new handler context.
1025 */
1026RTEMS_INLINE_ROUTINE void _Thread_Priority_set_change_handler(
1027  Thread_Control                 *the_thread,
1028  Thread_Priority_change_handler  new_handler,
1029  void                           *new_context
1030)
1031{
1032  the_thread->Priority.change_handler = new_handler;
1033  the_thread->Priority.change_handler_context = new_context;
1034}
1035
1036/**
1037 * @brief Restores the thread priority change default handler and its context.
1038 *
1039 * @param[in] the_thread The thread.
1040 */
1041RTEMS_INLINE_ROUTINE void _Thread_Priority_restore_default_change_handler(
1042  Thread_Control *the_thread
1043)
1044{
1045  the_thread->Priority.change_handler = _Thread_Priority_change_do_nothing;
1046}
1047
1048/**
1049 * @brief The initial thread wait flags value set by _Thread_Initialize().
1050 */
1051#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1052
1053/**
1054 * @brief Mask to get the thread wait state flags.
1055 */
1056#define THREAD_WAIT_STATE_MASK 0xffU
1057
1058/**
1059 * @brief Indicates that the thread begins with the blocking operation.
1060 *
1061 * A blocking operation consists of an optional watchdog initialization and the
1062 * setting of the appropriate thread blocking state with the corresponding
1063 * scheduler block operation.
1064 */
1065#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1066
1067/**
1068 * @brief Indicates that the thread completed the blocking operation.
1069 */
1070#define THREAD_WAIT_STATE_BLOCKED 0x2U
1071
1072/**
1073 * @brief Indicates that the thread progress condition is satisfied and it is
1074 * ready to resume execution.
1075 */
1076#define THREAD_WAIT_STATE_SATISFIED 0x4U
1077
1078/**
1079 * @brief Indicates that a timeout occurred and the thread is ready to resume
1080 * execution.
1081 */
1082#define THREAD_WAIT_STATE_TIMEOUT 0x8U
1083
1084/**
1085 * @brief Indicates that the thread progress condition was satisfied during the
1086 * blocking operation and it is ready to resume execution.
1087 */
1088#define THREAD_WAIT_STATE_INTERRUPT_SATISFIED 0x10U
1089
1090/**
1091 * @brief Indicates that a timeout occurred during the blocking operation and
1092 * the thread is ready to resume execution.
1093 */
1094#define THREAD_WAIT_STATE_INTERRUPT_TIMEOUT 0x20U
1095
1096/**
1097 * @brief Mask to get the thread wait class flags.
1098 */
1099#define THREAD_WAIT_CLASS_MASK 0xff00U
1100
1101/**
1102 * @brief Indicates that the thread waits for an event.
1103 */
1104#define THREAD_WAIT_CLASS_EVENT 0x100U
1105
1106/**
1107 * @brief Indicates that the thread waits for a system event.
1108 */
1109#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1110
1111/**
1112 * @brief Indicates that the thread waits for a object.
1113 */
1114#define THREAD_WAIT_CLASS_OBJECT 0x400U
1115
1116RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1117  Thread_Control    *the_thread,
1118  Thread_Wait_flags  flags
1119)
1120{
1121#if defined(RTEMS_SMP)
1122  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1123#else
1124  the_thread->Wait.flags = flags;
1125#endif
1126}
1127
1128RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1129  const Thread_Control *the_thread
1130)
1131{
1132#if defined(RTEMS_SMP)
1133  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1134#else
1135  return the_thread->Wait.flags;
1136#endif
1137}
1138
1139/**
1140 * @brief Tries to change the thread wait flags inside a critical section
1141 * (interrupts disabled).
1142 *
1143 * In case the wait flags are equal to the expected wait flags, then the wait
1144 * flags are set to the desired wait flags.
1145 *
1146 * @param[in] the_thread The thread.
1147 * @param[in] expected_flags The expected wait flags.
1148 * @param[in] desired_flags The desired wait flags.
1149 *
1150 * @retval true The wait flags were equal to the expected wait flags.
1151 * @retval false Otherwise.
1152 */
1153RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_critical(
1154  Thread_Control    *the_thread,
1155  Thread_Wait_flags  expected_flags,
1156  Thread_Wait_flags  desired_flags
1157)
1158{
1159#if defined(RTEMS_SMP)
1160  return _Atomic_Compare_exchange_uint(
1161    &the_thread->Wait.flags,
1162    &expected_flags,
1163    desired_flags,
1164    ATOMIC_ORDER_RELAXED,
1165    ATOMIC_ORDER_RELAXED
1166  );
1167#else
1168  bool success = the_thread->Wait.flags == expected_flags;
1169
1170  if ( success ) {
1171    the_thread->Wait.flags = desired_flags;
1172  }
1173
1174  return success;
1175#endif
1176}
1177
1178/**
1179 * @brief Tries to change the thread wait flags.
1180 *
1181 * @see _Thread_Wait_flags_try_change_critical().
1182 */
1183RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change(
1184  Thread_Control    *the_thread,
1185  Thread_Wait_flags  expected_flags,
1186  Thread_Wait_flags  desired_flags
1187)
1188{
1189  bool success;
1190#if !defined(RTEMS_SMP)
1191  ISR_Level level;
1192
1193  _ISR_Disable_without_giant( level );
1194#endif
1195
1196  success = _Thread_Wait_flags_try_change_critical(
1197    the_thread,
1198    expected_flags,
1199    desired_flags
1200  );
1201
1202#if !defined(RTEMS_SMP)
1203  _ISR_Enable_without_giant( level );
1204#endif
1205
1206  return success;
1207}
1208
1209RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
1210  Thread_Control  *the_thread,
1211  Per_CPU_Control *cpu
1212)
1213{
1214#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
1215  the_thread->Scheduler.debug_real_cpu = cpu;
1216#else
1217  (void) the_thread;
1218  (void) cpu;
1219#endif
1220}
1221
1222#if !defined(__DYNAMIC_REENT__)
1223/**
1224 * This routine returns the C library re-enterant pointer.
1225 */
1226
1227RTEMS_INLINE_ROUTINE struct _reent **_Thread_Get_libc_reent( void )
1228{
1229  return _Thread_libc_reent;
1230}
1231
1232/**
1233 * This routine set the C library re-enterant pointer.
1234 */
1235
1236RTEMS_INLINE_ROUTINE void _Thread_Set_libc_reent (
1237  struct _reent **libc_reent
1238)
1239{
1240  _Thread_libc_reent = libc_reent;
1241}
1242#endif
1243
1244/** @}*/
1245
1246#ifdef __cplusplus
1247}
1248#endif
1249
1250#if defined(RTEMS_MULTIPROCESSING)
1251#include <rtems/score/threadmp.h>
1252#endif
1253
1254#endif
1255/* end of include file */
Note: See TracBrowser for help on using the repository browser.