source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ ccd5434

5
Last change on this file since ccd5434 was ccd5434, checked in by Sebastian Huber <sebastian.huber@…>, on 01/07/16 at 08:55:45

score: Introduce Thread_Entry_information

This avoids potential dead code in _Thread_Handler(). It gets rid of
the dangerous function pointer casts.

Update #2514.

  • Property mode set to 100644
File size: 38.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014-2015 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/resourceimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/sysstate.h>
33#include <rtems/score/threadqimpl.h>
34#include <rtems/score/todimpl.h>
35#include <rtems/score/freechain.h>
36#include <rtems/config.h>
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42/**
43 * @addtogroup ScoreThread
44 */
45/**@{**/
46
47/**
48 *  The following structure contains the information necessary to manage
49 *  a thread which it is  waiting for a resource.
50 */
51#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
52
53/**
54 *  Self for the GNU Ada Run-Time
55 */
56SCORE_EXTERN void *rtems_ada_self;
57
58typedef struct {
59  Objects_Information Objects;
60
61  Freechain_Control Free_thread_queue_heads;
62} Thread_Information;
63
64/**
65 *  The following defines the information control block used to
66 *  manage this class of objects.
67 */
68SCORE_EXTERN Thread_Information _Thread_Internal_information;
69
70/**
71 *  The following points to the thread whose floating point
72 *  context is currently loaded.
73 */
74#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
75SCORE_EXTERN Thread_Control *_Thread_Allocated_fp;
76#endif
77
78#define THREAD_CHAIN_NODE_TO_THREAD( node ) \
79  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.Chain )
80
81#define THREAD_RBTREE_NODE_TO_THREAD( node ) \
82  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.RBTree )
83
84#if defined(RTEMS_SMP)
85#define THREAD_RESOURCE_NODE_TO_THREAD( node ) \
86  RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node )
87#endif
88
89void _Thread_Initialize_information(
90  Thread_Information  *information,
91  Objects_APIs         the_api,
92  uint16_t             the_class,
93  uint32_t             maximum,
94  bool                 is_string,
95  uint32_t             maximum_name_length
96#if defined(RTEMS_MULTIPROCESSING)
97  ,
98  bool                 supports_global
99#endif
100);
101
102/**
103 *  @brief Initialize thread handler.
104 *
105 *  This routine performs the initialization necessary for this handler.
106 */
107void _Thread_Handler_initialization(void);
108
109/**
110 *  @brief Create idle thread.
111 *
112 *  This routine creates the idle thread.
113 *
114 *  @warning No thread should be created before this one.
115 */
116void _Thread_Create_idle(void);
117
118/**
119 *  @brief Start thread multitasking.
120 *
121 *  This routine initiates multitasking.  It is invoked only as
122 *  part of initialization and its invocation is the last act of
123 *  the non-multitasking part of the system initialization.
124 */
125void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
126
127/**
128 *  @brief Allocate the requested stack space for the thread.
129 *
130 *  Allocate the requested stack space for the thread.
131 *  Set the Start.stack field to the address of the stack.
132 *
133 *  @param[in] the_thread is the thread where the stack space is requested
134 *  @param[in] stack_size is the stack space is requested
135 *
136 *  @retval actual size allocated after any adjustment
137 *  @retval zero if the allocation failed
138 */
139size_t _Thread_Stack_Allocate(
140  Thread_Control *the_thread,
141  size_t          stack_size
142);
143
144/**
145 *  @brief Deallocate thread stack.
146 *
147 *  Deallocate the Thread's stack.
148 */
149void _Thread_Stack_Free(
150  Thread_Control *the_thread
151);
152
153/**
154 *  @brief Initialize thread.
155 *
156 *  This routine initializes the specified the thread.  It allocates
157 *  all memory associated with this thread.  It completes by adding
158 *  the thread to the local object table so operations on this
159 *  thread id are allowed.
160 *
161 *  @note If stack_area is NULL, it is allocated from the workspace.
162 *
163 *  @note If the stack is allocated from the workspace, then it is
164 *        guaranteed to be of at least minimum size.
165 */
166bool _Thread_Initialize(
167  Thread_Information                   *information,
168  Thread_Control                       *the_thread,
169  const struct Scheduler_Control       *scheduler,
170  void                                 *stack_area,
171  size_t                                stack_size,
172  bool                                  is_fp,
173  Priority_Control                      priority,
174  bool                                  is_preemptible,
175  Thread_CPU_budget_algorithms          budget_algorithm,
176  Thread_CPU_budget_algorithm_callout   budget_callout,
177  uint32_t                              isr_level,
178  Objects_Name                          name
179);
180
181/**
182 *  @brief Initializes thread and executes it.
183 *
184 *  This routine initializes the executable information for a thread
185 *  and makes it ready to execute.  After this routine executes, the
186 *  thread competes with all other threads for CPU time.
187 *
188 *  @param the_thread The thread to be started.
189 *  @param entry The thread entry information.
190 *  @param[in,out] cpu The processor if used to start an idle thread
191 *  during system initialization.  Must be set to @c NULL to start a normal
192 *  thread.
193 */
194bool _Thread_Start(
195  Thread_Control                 *the_thread,
196  const Thread_Entry_information *entry,
197  Per_CPU_Control                *cpu
198);
199
200bool _Thread_Restart(
201  Thread_Control                 *the_thread,
202  Thread_Control                 *executing,
203  const Thread_Entry_information *entry
204);
205
206void _Thread_Yield( Thread_Control *executing );
207
208bool _Thread_Set_life_protection( bool protect );
209
210void _Thread_Life_action_handler(
211  Thread_Control  *executing,
212  Thread_Action   *action,
213  Per_CPU_Control *cpu,
214  ISR_Level        level
215);
216
217/**
218 * @brief Kills all zombie threads in the system.
219 *
220 * Threads change into the zombie state as the last step in the thread
221 * termination sequence right before a context switch to the heir thread is
222 * initiated.  Since the thread stack is still in use during this phase we have
223 * to postpone the thread stack reclamation until this point.  On SMP
224 * configurations we may have to busy wait for context switch completion here.
225 */
226void _Thread_Kill_zombies( void );
227
228/**
229 * @brief Closes the thread.
230 *
231 * Closes the thread object and starts the thread termination sequence.  In
232 * case the executing thread is not terminated, then this function waits until
233 * the terminating thread reached the zombie state.
234 */
235void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
236
237/**
238 * @brief Clears the specified thread state.
239 *
240 * In case the previous state is a non-ready state and the next state is the
241 * ready state, then the thread is unblocked by the scheduler.
242 *
243 * @param[in] the_thread The thread.
244 * @param[in] state The state to clear.  It must not be zero.
245 *
246 * @return The previous state.
247 */
248States_Control _Thread_Clear_state(
249  Thread_Control *the_thread,
250  States_Control  state
251);
252
253/**
254 * @brief Sets the specified thread state.
255 *
256 * In case the previous state is the ready state, then the thread is blocked by
257 * the scheduler.
258 *
259 * @param[in] the_thread The thread.
260 * @param[in] state The state to set.  It must not be zero.
261 *
262 * @return The previous state.
263 */
264States_Control _Thread_Set_state(
265  Thread_Control *the_thread,
266  States_Control  state
267);
268
269/**
270 * @brief Clears all thread states.
271 *
272 * In case the previous state is a non-ready state, then the thread is
273 * unblocked by the scheduler.
274 *
275 * @param[in] the_thread The thread.
276 */
277RTEMS_INLINE_ROUTINE void _Thread_Ready(
278  Thread_Control *the_thread
279)
280{
281  _Thread_Clear_state( the_thread, STATES_ALL_SET );
282}
283
284/**
285 *  @brief Initializes enviroment for a thread.
286 *
287 *  This routine initializes the context of @a the_thread to its
288 *  appropriate starting state.
289 *
290 *  @param[in] the_thread is the pointer to the thread control block.
291 */
292void _Thread_Load_environment(
293  Thread_Control *the_thread
294);
295
296void _Thread_Entry_adaptor_idle( Thread_Control *executing );
297
298void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
299
300void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
301
302/**
303 *  @brief Wrapper function for all threads.
304 *
305 *  This routine is the wrapper function for all threads.  It is
306 *  the starting point for all threads.  The user provided thread
307 *  entry point is invoked by this routine.  Operations
308 *  which must be performed immediately before and after the user's
309 *  thread executes are found here.
310 *
311 *  @note On entry, it is assumed all interrupts are blocked and that this
312 *  routine needs to set the initial isr level.  This may or may not
313 *  actually be needed by the context switch routine and as a result
314 *  interrupts may already be at there proper level.  Either way,
315 *  setting the initial isr level properly here is safe.
316 */
317void _Thread_Handler( void );
318
319/**
320 * @brief Executes the global constructors and then restarts itself as the
321 * first initialization thread.
322 *
323 * The first initialization thread is the first RTEMS initialization task or
324 * the first POSIX initialization thread in case no RTEMS initialization tasks
325 * are present.
326 */
327void _Thread_Global_construction(
328  Thread_Control                 *executing,
329  const Thread_Entry_information *entry
330) RTEMS_NO_RETURN;
331
332/**
333 *  @brief Ended the delay of a thread.
334 *
335 *  This routine is invoked when a thread must be unblocked at the
336 *  end of a time based delay (i.e. wake after or wake when).
337 *  It is called by the watchdog handler.
338 *
339 *  @param[in] id is the thread id
340 *  @param[in] ignored is not used
341 */
342void _Thread_Delay_ended(
343  Objects_Id  id,
344  void       *ignored
345);
346
347/**
348 * @brief Returns true if the left thread priority is less than the right
349 * thread priority in the intuitive sense of priority and false otherwise.
350 */
351RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
352  Priority_Control left,
353  Priority_Control right
354)
355{
356  return left > right;
357}
358
359/**
360 * @brief Returns the highest priority of the left and right thread priorities
361 * in the intuitive sense of priority.
362 */
363RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
364  Priority_Control left,
365  Priority_Control right
366)
367{
368  return _Thread_Priority_less_than( left, right ) ? right : left;
369}
370
371/**
372 * @brief Filters a thread priority change.
373 *
374 * Called by _Thread_Change_priority() under the protection of the thread lock.
375 *
376 * @param[in] the_thread The thread.
377 * @param[in, out] new_priority The new priority of the thread.  The filter may
378 * alter this value.
379 * @param[in] arg The argument passed to _Thread_Change_priority().
380 *
381 * @retval true Change the current priority.
382 * @retval false Otherwise.
383 */
384typedef bool ( *Thread_Change_priority_filter )(
385  Thread_Control   *the_thread,
386  Priority_Control *new_priority,
387  void             *arg
388);
389
390/**
391 * @brief Changes the priority of a thread if allowed by the filter function.
392 *
393 * It changes current priority of the thread to the new priority in case the
394 * filter function returns true.  In this case the scheduler is notified of the
395 * priority change as well.
396 *
397 * @param[in] the_thread The thread.
398 * @param[in] new_priority The new priority of the thread.
399 * @param[in] arg The argument for the filter function.
400 * @param[in] filter The filter function to determine if a priority change is
401 * allowed and optionally perform other actions under the protection of the
402 * thread lock simultaneously with the update of the current priority.
403 * @param[in] prepend_it In case this is true, then the thread is prepended to
404 * its priority group in its scheduler instance, otherwise it is appended.
405 */
406void _Thread_Change_priority(
407  Thread_Control                *the_thread,
408  Priority_Control               new_priority,
409  void                          *arg,
410  Thread_Change_priority_filter  filter,
411  bool                           prepend_it
412);
413
414/**
415 * @brief Raises the priority of a thread.
416 *
417 * It changes the current priority of the thread to the new priority if the new
418 * priority is higher than the current priority.  In this case the thread is
419 * appended to its new priority group in its scheduler instance.
420 *
421 * @param[in] the_thread The thread.
422 * @param[in] new_priority The new priority of the thread.
423 *
424 * @see _Thread_Change_priority().
425 */
426void _Thread_Raise_priority(
427  Thread_Control   *the_thread,
428  Priority_Control  new_priority
429);
430
431/**
432 * @brief Inherit the priority of a thread.
433 *
434 * It changes the current priority of the inheritor thread to the current priority
435 * of the ancestor thread if it is higher than the current priority of the inheritor
436 * thread.  In this case the inheritor thread is appended to its new priority group
437 * in its scheduler instance.
438 *
439 * On SMP configurations, the priority is changed to PRIORITY_PSEUDO_ISR in
440 * case the own schedulers of the inheritor and ancestor thread differ (priority
441 * boosting).
442 *
443 * @param[in] inheritor The thread to inherit the priority.
444 * @param[in] ancestor The thread to bequeath its priority to the inheritor
445 *   thread.
446 */
447#if defined(RTEMS_SMP)
448void _Thread_Inherit_priority(
449  Thread_Control *inheritor,
450  Thread_Control *ancestor
451);
452#else
453RTEMS_INLINE_ROUTINE void _Thread_Inherit_priority(
454  Thread_Control *inheritor,
455  Thread_Control *ancestor
456)
457{
458  _Thread_Raise_priority( inheritor, ancestor->current_priority );
459}
460#endif
461
462/**
463 * @brief Sets the current to the real priority of a thread.
464 *
465 * Sets the priority restore hint to false.
466 */
467void _Thread_Restore_priority( Thread_Control *the_thread );
468
469/**
470 * @brief Sets the priority of a thread.
471 *
472 * It sets the real priority of the thread.  In addition it changes the current
473 * priority of the thread if the new priority is higher than the current
474 * priority or the thread owns no resources.
475 *
476 * @param[in] the_thread The thread.
477 * @param[in] new_priority The new priority of the thread.
478 * @param[out] old_priority The old real priority of the thread.  This pointer
479 * must not be @c NULL.
480 * @param[in] prepend_it In case this is true, then the thread is prepended to
481 * its priority group in its scheduler instance, otherwise it is appended.
482 *
483 * @see _Thread_Change_priority().
484 */
485void _Thread_Set_priority(
486  Thread_Control   *the_thread,
487  Priority_Control  new_priority,
488  Priority_Control *old_priority,
489  bool              prepend_it
490);
491
492/**
493 *  @brief Maps thread Id to a TCB pointer.
494 *
495 *  This function maps thread IDs to thread control
496 *  blocks.  If ID corresponds to a local thread, then it
497 *  returns the_thread control pointer which maps to ID
498 *  and @a location is set to OBJECTS_LOCAL.  If the thread ID is
499 *  global and resides on a remote node, then location is set
500 *  to OBJECTS_REMOTE, and the_thread is undefined.
501 *  Otherwise, location is set to OBJECTS_ERROR and
502 *  the_thread is undefined.
503 *
504 *  @param[in] id is the id of the thread.
505 *  @param[in] location is the location of the block.
506 *
507 *  @note  The performance of many RTEMS services depends upon
508 *         the quick execution of the "good object" path in this
509 *         routine.  If there is a possibility of saving a few
510 *         cycles off the execution time, this routine is worth
511 *         further optimization attention.
512 */
513Thread_Control *_Thread_Get (
514  Objects_Id         id,
515  Objects_Locations *location
516);
517
518/**
519 * @brief Gets a thread by its identifier.
520 *
521 * @see _Objects_Get_isr_disable().
522 */
523Thread_Control *_Thread_Get_interrupt_disable(
524  Objects_Id         id,
525  Objects_Locations *location,
526  ISR_lock_Context  *lock_context
527);
528
529RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
530  const Thread_Control *thread
531)
532{
533#if defined(RTEMS_SMP)
534  return thread->Scheduler.cpu;
535#else
536  (void) thread;
537
538  return _Per_CPU_Get();
539#endif
540}
541
542RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
543  Thread_Control *thread,
544  Per_CPU_Control *cpu
545)
546{
547#if defined(RTEMS_SMP)
548  thread->Scheduler.cpu = cpu;
549#else
550  (void) thread;
551  (void) cpu;
552#endif
553}
554
555/**
556 * This function returns true if the_thread is the currently executing
557 * thread, and false otherwise.
558 */
559
560RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
561  const Thread_Control *the_thread
562)
563{
564  return ( the_thread == _Thread_Executing );
565}
566
567#if defined(RTEMS_SMP)
568/**
569 * @brief Returns @a true in case the thread executes currently on some
570 * processor in the system, otherwise @a false.
571 *
572 * Do not confuse this with _Thread_Is_executing() which checks only the
573 * current processor.
574 */
575RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
576  const Thread_Control *the_thread
577)
578{
579  return _CPU_Context_Get_is_executing( &the_thread->Registers );
580}
581#endif
582
583/**
584 * @brief Returns @a true and sets time_of_context_switch to the
585 * time of the last context switch when the thread is currently executing
586 * in the system, otherwise @a false.
587 */
588RTEMS_INLINE_ROUTINE bool _Thread_Get_time_of_last_context_switch(
589  Thread_Control    *the_thread,
590  Timestamp_Control *time_of_context_switch
591)
592{
593  bool retval = false;
594
595  _Thread_Disable_dispatch();
596  #ifndef RTEMS_SMP
597    if ( _Thread_Executing->Object.id == the_thread->Object.id ) {
598      *time_of_context_switch = _Thread_Time_of_last_context_switch;
599      retval = true;
600    }
601  #else
602    if ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
603      *time_of_context_switch =
604        _Thread_Get_CPU( the_thread )->time_of_last_context_switch;
605      retval = true;
606    }
607  #endif
608  _Thread_Enable_dispatch();
609  return retval;
610}
611
612
613/**
614 * This function returns true if the_thread is the heir
615 * thread, and false otherwise.
616 */
617
618RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
619  const Thread_Control *the_thread
620)
621{
622  return ( the_thread == _Thread_Heir );
623}
624
625/**
626 * This routine clears any blocking state for the_thread.  It performs
627 * any necessary scheduling operations including the selection of
628 * a new heir thread.
629 */
630
631RTEMS_INLINE_ROUTINE void _Thread_Unblock (
632  Thread_Control *the_thread
633)
634{
635  _Thread_Clear_state( the_thread, STATES_BLOCKED );
636}
637
638/**
639 * This routine resets the current context of the calling thread
640 * to that of its initial state.
641 */
642
643RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
644{
645#if defined(RTEMS_SMP)
646  ISR_Level level;
647
648  _Giant_Release( _Per_CPU_Get() );
649
650  _ISR_Disable_without_giant( level );
651  ( void ) level;
652#endif
653
654#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
655  if ( executing->fp_context != NULL )
656    _Context_Restore_fp( &executing->fp_context );
657#endif
658
659  _CPU_Context_Restart_self( &executing->Registers );
660}
661
662/**
663 * This function returns true if the floating point context of
664 * the_thread is currently loaded in the floating point unit, and
665 * false otherwise.
666 */
667
668#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
669RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
670  const Thread_Control *the_thread
671)
672{
673  return ( the_thread == _Thread_Allocated_fp );
674}
675#endif
676
677/*
678 *  If the CPU has hardware floating point, then we must address saving
679 *  and restoring it as part of the context switch.
680 *
681 *  The second conditional compilation section selects the algorithm used
682 *  to context switch between floating point tasks.  The deferred algorithm
683 *  can be significantly better in a system with few floating point tasks
684 *  because it reduces the total number of save and restore FP context
685 *  operations.  However, this algorithm can not be used on all CPUs due
686 *  to unpredictable use of FP registers by some compilers for integer
687 *  operations.
688 */
689
690RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
691{
692#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
693#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
694  if ( executing->fp_context != NULL )
695    _Context_Save_fp( &executing->fp_context );
696#endif
697#endif
698}
699
700RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
701{
702#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
703#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
704  if ( (executing->fp_context != NULL) &&
705       !_Thread_Is_allocated_fp( executing ) ) {
706    if ( _Thread_Allocated_fp != NULL )
707      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
708    _Context_Restore_fp( &executing->fp_context );
709    _Thread_Allocated_fp = executing;
710  }
711#else
712  if ( executing->fp_context != NULL )
713    _Context_Restore_fp( &executing->fp_context );
714#endif
715#endif
716}
717
718/**
719 * This routine is invoked when the currently loaded floating
720 * point context is now longer associated with an active thread.
721 */
722
723#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
724RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
725{
726  _Thread_Allocated_fp = NULL;
727}
728#endif
729
730/**
731 * This function returns true if dispatching is disabled, and false
732 * otherwise.
733 */
734
735RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
736{
737  return ( _Thread_Dispatch_necessary );
738}
739
740/**
741 * This function returns true if the_thread is NULL and false otherwise.
742 */
743
744RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
745  const Thread_Control *the_thread
746)
747{
748  return ( the_thread == NULL );
749}
750
751/**
752 * @brief Is proxy blocking.
753 *
754 * status which indicates that a proxy is blocking, and false otherwise.
755 */
756RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
757  uint32_t   code
758)
759{
760  return (code == THREAD_STATUS_PROXY_BLOCKING);
761}
762
763RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
764{
765  /* Idle threads */
766  uint32_t maximum_internal_threads =
767    rtems_configuration_get_maximum_processors();
768
769  /* MPCI thread */
770#if defined(RTEMS_MULTIPROCESSING)
771  if ( _System_state_Is_multiprocessing ) {
772    ++maximum_internal_threads;
773  }
774#endif
775
776  return maximum_internal_threads;
777}
778
779RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
780{
781  return (Thread_Control *)
782    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
783}
784
785/**
786 * @brief Gets the heir of the processor and makes it executing.
787 *
788 * Must be called with interrupts disabled.  The thread dispatch necessary
789 * indicator is cleared as a side-effect.
790 *
791 * @return The heir thread.
792 *
793 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
794 * _Thread_Dispatch_update_heir().
795 */
796RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
797  Per_CPU_Control *cpu_self
798)
799{
800  Thread_Control *heir;
801
802  heir = cpu_self->heir;
803  cpu_self->dispatch_necessary = false;
804  cpu_self->executing = heir;
805
806  return heir;
807}
808
809#if defined( RTEMS_SMP )
810RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
811  Per_CPU_Control *cpu_self,
812  Per_CPU_Control *cpu_for_heir,
813  Thread_Control  *heir
814)
815{
816  cpu_for_heir->heir = heir;
817
818  if ( cpu_for_heir == cpu_self ) {
819    cpu_self->dispatch_necessary = true;
820  } else {
821    _Per_CPU_Send_interrupt( cpu_for_heir );
822  }
823}
824#endif
825
826RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used(
827  Thread_Control *executing,
828  Timestamp_Control *time_of_last_context_switch
829)
830{
831  Timestamp_Control uptime;
832  Timestamp_Control ran;
833
834  _TOD_Get_uptime( &uptime );
835  _Timestamp_Subtract(
836    time_of_last_context_switch,
837    &uptime,
838    &ran
839  );
840  *time_of_last_context_switch = uptime;
841  _Timestamp_Add_to( &executing->cpu_time_used, &ran );
842}
843
844RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
845  Thread_Action_control *action_control
846)
847{
848  _Chain_Initialize_empty( &action_control->Chain );
849}
850
851RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
852  Thread_Action *action
853)
854{
855  _Chain_Set_off_chain( &action->Node );
856}
857
858RTEMS_INLINE_ROUTINE Per_CPU_Control *
859  _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level )
860{
861  Per_CPU_Control *cpu;
862
863  _ISR_Disable_without_giant( *level );
864  cpu = _Per_CPU_Get();
865  _Per_CPU_Acquire( cpu );
866
867  return cpu;
868}
869
870RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire(
871  Thread_Control *thread,
872  ISR_Level      *level
873)
874{
875  Per_CPU_Control *cpu;
876
877  _ISR_Disable_without_giant( *level );
878  cpu = _Thread_Get_CPU( thread );
879  _Per_CPU_Acquire( cpu );
880
881  return cpu;
882}
883
884RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable(
885  Per_CPU_Control *cpu,
886  ISR_Level level
887)
888{
889  _Per_CPU_Release_and_ISR_enable( cpu, level );
890}
891
892RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
893  Thread_Control        *thread,
894  Thread_Action         *action,
895  Thread_Action_handler  handler
896)
897{
898  Per_CPU_Control *cpu_of_thread;
899  ISR_Level        level;
900
901  cpu_of_thread = _Thread_Action_ISR_disable_and_acquire( thread, &level );
902
903  action->handler = handler;
904
905#if defined(RTEMS_SMP)
906  if ( _Per_CPU_Get() == cpu_of_thread ) {
907    cpu_of_thread->dispatch_necessary = true;
908  } else {
909    _Per_CPU_Send_interrupt( cpu_of_thread );
910  }
911#else
912  cpu_of_thread->dispatch_necessary = true;
913#endif
914
915  _Chain_Append_if_is_off_chain_unprotected(
916    &thread->Post_switch_actions.Chain,
917    &action->Node
918  );
919
920  _Thread_Action_release_and_ISR_enable( cpu_of_thread, level );
921}
922
923RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
924  Thread_Life_state life_state
925)
926{
927  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
928}
929
930RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
931  Thread_Life_state life_state
932)
933{
934  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
935}
936
937RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected(
938  Thread_Life_state life_state
939)
940{
941  return ( life_state & THREAD_LIFE_PROTECTED ) != 0;
942}
943
944RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
945  Thread_Life_state life_state
946)
947{
948  return ( life_state & THREAD_LIFE_RESTARTING_TERMINATING ) != 0;
949}
950
951/**
952 * @brief Returns true if the thread owns resources, and false otherwise.
953 *
954 * Resources are accounted with the Thread_Control::resource_count resource
955 * counter.  This counter is used by semaphore objects for example.
956 *
957 * In addition to the resource counter there is a resource dependency tree
958 * available on SMP configurations.  In case this tree is non-empty, then the
959 * thread owns resources.
960 *
961 * @param[in] the_thread The thread.
962 */
963RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
964  const Thread_Control *the_thread
965)
966{
967  bool owns_resources = the_thread->resource_count != 0;
968
969#if defined(RTEMS_SMP)
970  owns_resources = owns_resources
971    || _Resource_Node_owns_resources( &the_thread->Resource_node );
972#endif
973
974  return owns_resources;
975}
976
977/**
978 * @brief Acquires the default thread lock inside a critical section
979 * (interrupts disabled).
980 *
981 * @param[in] the_thread The thread.
982 * @param[in] lock_context The lock context used for the corresponding lock
983 * release.
984 *
985 * @see _Thread_Lock_release_default().
986 */
987RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default_critical(
988  Thread_Control   *the_thread,
989  ISR_lock_Context *lock_context
990)
991{
992  _Assert( _ISR_Get_level() != 0 );
993#if defined(RTEMS_SMP)
994  _SMP_ticket_lock_Acquire(
995    &the_thread->Lock.Default,
996    &_Thread_Executing->Lock.Stats,
997    &lock_context->Lock_context.Stats_context
998  );
999#else
1000  (void) the_thread;
1001  (void) lock_context;
1002#endif
1003}
1004
1005/**
1006 * @brief Acquires the default thread lock and returns the executing thread.
1007 *
1008 * @param[in] lock_context The lock context used for the corresponding lock
1009 * release.
1010 *
1011 * @return The executing thread.
1012 *
1013 * @see _Thread_Lock_release_default().
1014 */
1015RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Lock_acquire_default_for_executing(
1016  ISR_lock_Context *lock_context
1017)
1018{
1019  Thread_Control *executing;
1020
1021  _ISR_lock_ISR_disable( lock_context );
1022  executing = _Thread_Executing;
1023  _Thread_Lock_acquire_default_critical( executing, lock_context );
1024
1025  return executing;
1026}
1027
1028/**
1029 * @brief Acquires the default thread lock.
1030 *
1031 * @param[in] the_thread The thread.
1032 * @param[in] lock_context The lock context used for the corresponding lock
1033 * release.
1034 *
1035 * @see _Thread_Lock_release_default().
1036 */
1037RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default(
1038  Thread_Control   *the_thread,
1039  ISR_lock_Context *lock_context
1040)
1041{
1042  _ISR_lock_ISR_disable( lock_context );
1043  _Thread_Lock_acquire_default_critical( the_thread, lock_context );
1044}
1045
1046/**
1047 * @brief Releases the thread lock inside a critical section (interrupts
1048 * disabled).
1049 *
1050 * The previous interrupt status is not restored.
1051 *
1052 * @param[in] lock The lock.
1053 * @param[in] lock_context The lock context used for the corresponding lock
1054 * acquire.
1055 */
1056RTEMS_INLINE_ROUTINE void _Thread_Lock_release_critical(
1057  void             *lock,
1058  ISR_lock_Context *lock_context
1059)
1060{
1061#if defined(RTEMS_SMP)
1062  _SMP_ticket_lock_Release(
1063    lock,
1064    &lock_context->Lock_context.Stats_context
1065  );
1066#else
1067  (void) lock;
1068  (void) lock_context;
1069#endif
1070}
1071
1072/**
1073 * @brief Releases the thread lock.
1074 *
1075 * @param[in] lock The lock returned by _Thread_Lock_acquire().
1076 * @param[in] lock_context The lock context used for _Thread_Lock_acquire().
1077 */
1078RTEMS_INLINE_ROUTINE void _Thread_Lock_release(
1079  void             *lock,
1080  ISR_lock_Context *lock_context
1081)
1082{
1083  _Thread_Lock_release_critical( lock, lock_context );
1084  _ISR_lock_ISR_enable( lock_context );
1085}
1086
1087/**
1088 * @brief Releases the default thread lock inside a critical section
1089 * (interrupts disabled).
1090 *
1091 * The previous interrupt status is not restored.
1092 *
1093 * @param[in] the_thread The thread.
1094 * @param[in] lock_context The lock context used for the corresponding lock
1095 * acquire.
1096 */
1097RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default_critical(
1098  Thread_Control   *the_thread,
1099  ISR_lock_Context *lock_context
1100)
1101{
1102  _Thread_Lock_release_critical(
1103#if defined(RTEMS_SMP)
1104    &the_thread->Lock.Default,
1105#else
1106    NULL,
1107#endif
1108    lock_context
1109  );
1110}
1111
1112/**
1113 * @brief Releases the default thread lock.
1114 *
1115 * @param[in] the_thread The thread.
1116 * @param[in] lock_context The lock context used for the corresponding lock
1117 * acquire.
1118 */
1119RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default(
1120  Thread_Control   *the_thread,
1121  ISR_lock_Context *lock_context
1122)
1123{
1124  _Thread_Lock_release_default_critical( the_thread, lock_context );
1125  _ISR_lock_ISR_enable( lock_context );
1126}
1127
1128/**
1129 * @brief Acquires the thread lock.
1130 *
1131 * @param[in] the_thread The thread.
1132 * @param[in] lock_context The lock context for _Thread_Lock_release().
1133 *
1134 * @return The lock required by _Thread_Lock_release().
1135 */
1136RTEMS_INLINE_ROUTINE void *_Thread_Lock_acquire(
1137  Thread_Control   *the_thread,
1138  ISR_lock_Context *lock_context
1139)
1140{
1141#if defined(RTEMS_SMP)
1142  SMP_ticket_lock_Control *lock;
1143
1144  while ( true ) {
1145    unsigned int first_generation;
1146    unsigned int second_generation;
1147
1148    _ISR_lock_ISR_disable( lock_context );
1149
1150    /*
1151     * Ensure that we read our first lock generation before we obtain our
1152     * current lock.  See _Thread_Lock_set_unprotected().
1153     */
1154    first_generation = _Atomic_Load_uint(
1155      &the_thread->Lock.generation,
1156      ATOMIC_ORDER_ACQUIRE
1157    );
1158
1159    lock = the_thread->Lock.current;
1160    _SMP_ticket_lock_Acquire(
1161      lock,
1162      &_Thread_Executing->Lock.Stats,
1163      &lock_context->Lock_context.Stats_context
1164    );
1165
1166    /*
1167     * The C11 memory model doesn't guarantee that we read the latest
1168     * generation here.  For this a read-modify-write operation would be
1169     * necessary.  We read at least the new generation set up by the owner of
1170     * our current thread lock, and so on.
1171     */
1172    second_generation = _Atomic_Load_uint(
1173      &the_thread->Lock.generation,
1174      ATOMIC_ORDER_ACQUIRE
1175    );
1176
1177    if ( first_generation == second_generation ) {
1178      return lock;
1179    }
1180
1181    _Thread_Lock_release( lock, lock_context );
1182  }
1183#else
1184  _ISR_Disable( lock_context->isr_level );
1185
1186  return NULL;
1187#endif
1188}
1189
1190#if defined(RTEMS_SMP)
1191/*
1192 * Internal function, use _Thread_Lock_set() or _Thread_Lock_restore_default()
1193 * instead.
1194 */
1195RTEMS_INLINE_ROUTINE void _Thread_Lock_set_unprotected(
1196  Thread_Control          *the_thread,
1197  SMP_ticket_lock_Control *new_lock
1198)
1199{
1200  the_thread->Lock.current = new_lock;
1201
1202  /*
1203   * The generation release corresponds to the generation acquire in
1204   * _Thread_Lock_acquire() and ensures that the new lock and other fields are
1205   * visible to the next thread lock owner.  Otherwise someone would be able to
1206   * read an up to date generation number and an old lock.  See
1207   * _Thread_Wait_set_queue() and _Thread_Wait_restore_default_operations().
1208   *
1209   * Since we set a new lock right before, this increment is not protected by a
1210   * lock and thus must be an atomic operation.
1211   */
1212  _Atomic_Fetch_add_uint(
1213    &the_thread->Lock.generation,
1214    1,
1215    ATOMIC_ORDER_RELEASE
1216  );
1217}
1218#endif
1219
1220/**
1221 * @brief Sets a new thread lock.
1222 *
1223 * The caller must not be the owner of the default thread lock.  The caller
1224 * must be the owner of the new lock.
1225 *
1226 * @param[in] the_thread The thread.
1227 * @param[in] new_lock The new thread lock.
1228 */
1229#if defined(RTEMS_SMP)
1230RTEMS_INLINE_ROUTINE void _Thread_Lock_set(
1231  Thread_Control          *the_thread,
1232  SMP_ticket_lock_Control *new_lock
1233)
1234{
1235  ISR_lock_Context lock_context;
1236
1237  _Thread_Lock_acquire_default_critical( the_thread, &lock_context );
1238  _Assert( the_thread->Lock.current == &the_thread->Lock.Default );
1239  _Thread_Lock_set_unprotected( the_thread, new_lock );
1240  _Thread_Lock_release_default_critical( the_thread, &lock_context );
1241}
1242#else
1243#define _Thread_Lock_set( the_thread, new_lock ) \
1244  do { } while ( 0 )
1245#endif
1246
1247/**
1248 * @brief Restores the default thread lock.
1249 *
1250 * The caller must be the owner of the current thread lock.
1251 *
1252 * @param[in] the_thread The thread.
1253 */
1254#if defined(RTEMS_SMP)
1255RTEMS_INLINE_ROUTINE void _Thread_Lock_restore_default(
1256  Thread_Control *the_thread
1257)
1258{
1259  _Thread_Lock_set_unprotected( the_thread, &the_thread->Lock.Default );
1260}
1261#else
1262#define _Thread_Lock_restore_default( the_thread ) \
1263  do { } while ( 0 )
1264#endif
1265
1266/**
1267 * @brief The initial thread wait flags value set by _Thread_Initialize().
1268 */
1269#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1270
1271/**
1272 * @brief Mask to get the thread wait state flags.
1273 */
1274#define THREAD_WAIT_STATE_MASK 0xffU
1275
1276/**
1277 * @brief Indicates that the thread begins with the blocking operation.
1278 *
1279 * A blocking operation consists of an optional watchdog initialization and the
1280 * setting of the appropriate thread blocking state with the corresponding
1281 * scheduler block operation.
1282 */
1283#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1284
1285/**
1286 * @brief Indicates that the thread completed the blocking operation.
1287 */
1288#define THREAD_WAIT_STATE_BLOCKED 0x2U
1289
1290/**
1291 * @brief Indicates that a condition to end the thread wait occurred.
1292 *
1293 * This could be a timeout, a signal, an event or a resource availability.
1294 */
1295#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1296
1297/**
1298 * @brief Mask to get the thread wait class flags.
1299 */
1300#define THREAD_WAIT_CLASS_MASK 0xff00U
1301
1302/**
1303 * @brief Indicates that the thread waits for an event.
1304 */
1305#define THREAD_WAIT_CLASS_EVENT 0x100U
1306
1307/**
1308 * @brief Indicates that the thread waits for a system event.
1309 */
1310#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1311
1312/**
1313 * @brief Indicates that the thread waits for a object.
1314 */
1315#define THREAD_WAIT_CLASS_OBJECT 0x400U
1316
1317RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1318  Thread_Control    *the_thread,
1319  Thread_Wait_flags  flags
1320)
1321{
1322#if defined(RTEMS_SMP)
1323  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1324#else
1325  the_thread->Wait.flags = flags;
1326#endif
1327}
1328
1329RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1330  const Thread_Control *the_thread
1331)
1332{
1333#if defined(RTEMS_SMP)
1334  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1335#else
1336  return the_thread->Wait.flags;
1337#endif
1338}
1339
1340/**
1341 * @brief Tries to change the thread wait flags inside a critical section
1342 * (interrupts disabled).
1343 *
1344 * In case the wait flags are equal to the expected wait flags, then the wait
1345 * flags are set to the desired wait flags.
1346 *
1347 * @param[in] the_thread The thread.
1348 * @param[in] expected_flags The expected wait flags.
1349 * @param[in] desired_flags The desired wait flags.
1350 *
1351 * @retval true The wait flags were equal to the expected wait flags.
1352 * @retval false Otherwise.
1353 */
1354RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_critical(
1355  Thread_Control    *the_thread,
1356  Thread_Wait_flags  expected_flags,
1357  Thread_Wait_flags  desired_flags
1358)
1359{
1360#if defined(RTEMS_SMP)
1361  return _Atomic_Compare_exchange_uint(
1362    &the_thread->Wait.flags,
1363    &expected_flags,
1364    desired_flags,
1365    ATOMIC_ORDER_RELAXED,
1366    ATOMIC_ORDER_RELAXED
1367  );
1368#else
1369  bool success = the_thread->Wait.flags == expected_flags;
1370
1371  if ( success ) {
1372    the_thread->Wait.flags = desired_flags;
1373  }
1374
1375  return success;
1376#endif
1377}
1378
1379/**
1380 * @brief Tries to change the thread wait flags.
1381 *
1382 * @see _Thread_Wait_flags_try_change_critical().
1383 */
1384RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change(
1385  Thread_Control    *the_thread,
1386  Thread_Wait_flags  expected_flags,
1387  Thread_Wait_flags  desired_flags
1388)
1389{
1390  bool success;
1391#if !defined(RTEMS_SMP)
1392  ISR_Level level;
1393
1394  _ISR_Disable_without_giant( level );
1395#endif
1396
1397  success = _Thread_Wait_flags_try_change_critical(
1398    the_thread,
1399    expected_flags,
1400    desired_flags
1401  );
1402
1403#if !defined(RTEMS_SMP)
1404  _ISR_Enable_without_giant( level );
1405#endif
1406
1407  return success;
1408}
1409
1410/**
1411 * @brief Sets the thread queue.
1412 *
1413 * The caller must be the owner of the thread lock.
1414 *
1415 * @param[in] the_thread The thread.
1416 * @param[in] new_queue The new queue.
1417 *
1418 * @see _Thread_Lock_set().
1419 */
1420RTEMS_INLINE_ROUTINE void _Thread_Wait_set_queue(
1421  Thread_Control     *the_thread,
1422  Thread_queue_Queue *new_queue
1423)
1424{
1425  the_thread->Wait.queue = new_queue;
1426}
1427
1428/**
1429 * @brief Sets the thread queue operations.
1430 *
1431 * The caller must be the owner of the thread lock.
1432 *
1433 * @param[in] the_thread The thread.
1434 * @param[in] new_operations The new queue operations.
1435 *
1436 * @see _Thread_Lock_set() and _Thread_Wait_restore_default_operations().
1437 */
1438RTEMS_INLINE_ROUTINE void _Thread_Wait_set_operations(
1439  Thread_Control                *the_thread,
1440  const Thread_queue_Operations *new_operations
1441)
1442{
1443  the_thread->Wait.operations = new_operations;
1444}
1445
1446/**
1447 * @brief Restores the default thread queue operations.
1448 *
1449 * The caller must be the owner of the thread lock.
1450 *
1451 * @param[in] the_thread The thread.
1452 *
1453 * @see _Thread_Wait_set_operations().
1454 */
1455RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default_operations(
1456  Thread_Control *the_thread
1457)
1458{
1459  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1460}
1461
1462/**
1463 * @brief Sets the thread wait timeout code.
1464 *
1465 * @param[in] the_thread The thread.
1466 * @param[in] timeout_code The new thread wait timeout code.
1467 */
1468RTEMS_INLINE_ROUTINE void _Thread_Wait_set_timeout_code(
1469  Thread_Control *the_thread,
1470  uint32_t        timeout_code
1471)
1472{
1473  the_thread->Wait.timeout_code = timeout_code;
1474}
1475
1476/**
1477 * @brief General purpose thread wait timeout.
1478 *
1479 * @param[in] id Unused.
1480 * @param[in] arg The thread.
1481 */
1482void _Thread_Timeout( Objects_Id id, void *arg );
1483
1484RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
1485  Thread_Control  *the_thread,
1486  Per_CPU_Control *cpu
1487)
1488{
1489#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
1490  the_thread->Scheduler.debug_real_cpu = cpu;
1491#else
1492  (void) the_thread;
1493  (void) cpu;
1494#endif
1495}
1496
1497/** @}*/
1498
1499#ifdef __cplusplus
1500}
1501#endif
1502
1503#if defined(RTEMS_MULTIPROCESSING)
1504#include <rtems/score/threadmp.h>
1505#endif
1506
1507#endif
1508/* end of include file */
Note: See TracBrowser for help on using the repository browser.