source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 358bd740

5
Last change on this file since 358bd740 was 358bd740, checked in by Sebastian Huber <sebastian.huber@…>, on 02/03/16 at 11:41:02

score: Avoid SCORE_EXTERN

Delete SCORE_INIT. This finally removes the

some.h:

#ifndef SOME_XYZ_EXTERN
#define SOME_XYZ_EXTERN extern
#endif
SOME_XYZ_EXTERN type xyz;

some_xyz.c:

#define SOME_XYZ_EXTERN
#include <some.h>

pattern in favour of

some.h:

extern type xyz;

some_xyz.c

#include <some.h>
type xyz;

Update #2559.

  • Property mode set to 100644
File size: 38.2 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014-2015 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/assert.h>
26#include <rtems/score/chainimpl.h>
27#include <rtems/score/interr.h>
28#include <rtems/score/isr.h>
29#include <rtems/score/objectimpl.h>
30#include <rtems/score/resourceimpl.h>
31#include <rtems/score/statesimpl.h>
32#include <rtems/score/sysstate.h>
33#include <rtems/score/threadqimpl.h>
34#include <rtems/score/todimpl.h>
35#include <rtems/score/freechain.h>
36#include <rtems/config.h>
37
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42/**
43 * @addtogroup ScoreThread
44 */
45/**@{**/
46
47/**
48 *  The following structure contains the information necessary to manage
49 *  a thread which it is  waiting for a resource.
50 */
51#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
52
53/**
54 *  Self for the GNU Ada Run-Time
55 */
56extern void *rtems_ada_self;
57
58typedef struct {
59  Objects_Information Objects;
60
61  Freechain_Control Free_thread_queue_heads;
62} Thread_Information;
63
64/**
65 *  The following defines the information control block used to
66 *  manage this class of objects.
67 */
68extern Thread_Information _Thread_Internal_information;
69
70/**
71 *  The following points to the thread whose floating point
72 *  context is currently loaded.
73 */
74#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
75extern Thread_Control *_Thread_Allocated_fp;
76#endif
77
78#define THREAD_CHAIN_NODE_TO_THREAD( node ) \
79  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.Chain )
80
81#define THREAD_RBTREE_NODE_TO_THREAD( node ) \
82  RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Node.RBTree )
83
84#if defined(RTEMS_SMP)
85#define THREAD_RESOURCE_NODE_TO_THREAD( node ) \
86  RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node )
87#endif
88
89void _Thread_Initialize_information(
90  Thread_Information  *information,
91  Objects_APIs         the_api,
92  uint16_t             the_class,
93  uint32_t             maximum,
94  bool                 is_string,
95  uint32_t             maximum_name_length
96#if defined(RTEMS_MULTIPROCESSING)
97  ,
98  bool                 supports_global
99#endif
100);
101
102/**
103 *  @brief Initialize thread handler.
104 *
105 *  This routine performs the initialization necessary for this handler.
106 */
107void _Thread_Handler_initialization(void);
108
109/**
110 *  @brief Create idle thread.
111 *
112 *  This routine creates the idle thread.
113 *
114 *  @warning No thread should be created before this one.
115 */
116void _Thread_Create_idle(void);
117
118/**
119 *  @brief Start thread multitasking.
120 *
121 *  This routine initiates multitasking.  It is invoked only as
122 *  part of initialization and its invocation is the last act of
123 *  the non-multitasking part of the system initialization.
124 */
125void _Thread_Start_multitasking( void ) RTEMS_NO_RETURN;
126
127/**
128 *  @brief Allocate the requested stack space for the thread.
129 *
130 *  Allocate the requested stack space for the thread.
131 *  Set the Start.stack field to the address of the stack.
132 *
133 *  @param[in] the_thread is the thread where the stack space is requested
134 *  @param[in] stack_size is the stack space is requested
135 *
136 *  @retval actual size allocated after any adjustment
137 *  @retval zero if the allocation failed
138 */
139size_t _Thread_Stack_Allocate(
140  Thread_Control *the_thread,
141  size_t          stack_size
142);
143
144/**
145 *  @brief Deallocate thread stack.
146 *
147 *  Deallocate the Thread's stack.
148 */
149void _Thread_Stack_Free(
150  Thread_Control *the_thread
151);
152
153/**
154 *  @brief Initialize thread.
155 *
156 *  This routine initializes the specified the thread.  It allocates
157 *  all memory associated with this thread.  It completes by adding
158 *  the thread to the local object table so operations on this
159 *  thread id are allowed.
160 *
161 *  @note If stack_area is NULL, it is allocated from the workspace.
162 *
163 *  @note If the stack is allocated from the workspace, then it is
164 *        guaranteed to be of at least minimum size.
165 */
166bool _Thread_Initialize(
167  Thread_Information                   *information,
168  Thread_Control                       *the_thread,
169  const struct Scheduler_Control       *scheduler,
170  void                                 *stack_area,
171  size_t                                stack_size,
172  bool                                  is_fp,
173  Priority_Control                      priority,
174  bool                                  is_preemptible,
175  Thread_CPU_budget_algorithms          budget_algorithm,
176  Thread_CPU_budget_algorithm_callout   budget_callout,
177  uint32_t                              isr_level,
178  Objects_Name                          name
179);
180
181/**
182 *  @brief Initializes thread and executes it.
183 *
184 *  This routine initializes the executable information for a thread
185 *  and makes it ready to execute.  After this routine executes, the
186 *  thread competes with all other threads for CPU time.
187 *
188 *  @param the_thread The thread to be started.
189 *  @param entry The thread entry information.
190 */
191bool _Thread_Start(
192  Thread_Control                 *the_thread,
193  const Thread_Entry_information *entry
194);
195
196bool _Thread_Restart(
197  Thread_Control                 *the_thread,
198  Thread_Control                 *executing,
199  const Thread_Entry_information *entry
200);
201
202void _Thread_Yield( Thread_Control *executing );
203
204bool _Thread_Set_life_protection( bool protect );
205
206void _Thread_Life_action_handler(
207  Thread_Control  *executing,
208  Thread_Action   *action,
209  Per_CPU_Control *cpu,
210  ISR_Level        level
211);
212
213/**
214 * @brief Kills all zombie threads in the system.
215 *
216 * Threads change into the zombie state as the last step in the thread
217 * termination sequence right before a context switch to the heir thread is
218 * initiated.  Since the thread stack is still in use during this phase we have
219 * to postpone the thread stack reclamation until this point.  On SMP
220 * configurations we may have to busy wait for context switch completion here.
221 */
222void _Thread_Kill_zombies( void );
223
224/**
225 * @brief Closes the thread.
226 *
227 * Closes the thread object and starts the thread termination sequence.  In
228 * case the executing thread is not terminated, then this function waits until
229 * the terminating thread reached the zombie state.
230 */
231void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
232
233/**
234 * @brief Clears the specified thread state.
235 *
236 * In case the previous state is a non-ready state and the next state is the
237 * ready state, then the thread is unblocked by the scheduler.
238 *
239 * @param[in] the_thread The thread.
240 * @param[in] state The state to clear.  It must not be zero.
241 *
242 * @return The previous state.
243 */
244States_Control _Thread_Clear_state(
245  Thread_Control *the_thread,
246  States_Control  state
247);
248
249/**
250 * @brief Sets the specified thread state.
251 *
252 * In case the previous state is the ready state, then the thread is blocked by
253 * the scheduler.
254 *
255 * @param[in] the_thread The thread.
256 * @param[in] state The state to set.  It must not be zero.
257 *
258 * @return The previous state.
259 */
260States_Control _Thread_Set_state(
261  Thread_Control *the_thread,
262  States_Control  state
263);
264
265/**
266 * @brief Clears all thread states.
267 *
268 * In case the previous state is a non-ready state, then the thread is
269 * unblocked by the scheduler.
270 *
271 * @param[in] the_thread The thread.
272 */
273RTEMS_INLINE_ROUTINE void _Thread_Ready(
274  Thread_Control *the_thread
275)
276{
277  _Thread_Clear_state( the_thread, STATES_ALL_SET );
278}
279
280/**
281 *  @brief Initializes enviroment for a thread.
282 *
283 *  This routine initializes the context of @a the_thread to its
284 *  appropriate starting state.
285 *
286 *  @param[in] the_thread is the pointer to the thread control block.
287 */
288void _Thread_Load_environment(
289  Thread_Control *the_thread
290);
291
292void _Thread_Entry_adaptor_idle( Thread_Control *executing );
293
294void _Thread_Entry_adaptor_numeric( Thread_Control *executing );
295
296void _Thread_Entry_adaptor_pointer( Thread_Control *executing );
297
298/**
299 *  @brief Wrapper function for all threads.
300 *
301 *  This routine is the wrapper function for all threads.  It is
302 *  the starting point for all threads.  The user provided thread
303 *  entry point is invoked by this routine.  Operations
304 *  which must be performed immediately before and after the user's
305 *  thread executes are found here.
306 *
307 *  @note On entry, it is assumed all interrupts are blocked and that this
308 *  routine needs to set the initial isr level.  This may or may not
309 *  actually be needed by the context switch routine and as a result
310 *  interrupts may already be at there proper level.  Either way,
311 *  setting the initial isr level properly here is safe.
312 */
313void _Thread_Handler( void );
314
315/**
316 * @brief Executes the global constructors and then restarts itself as the
317 * first initialization thread.
318 *
319 * The first initialization thread is the first RTEMS initialization task or
320 * the first POSIX initialization thread in case no RTEMS initialization tasks
321 * are present.
322 */
323void _Thread_Global_construction(
324  Thread_Control                 *executing,
325  const Thread_Entry_information *entry
326) RTEMS_NO_RETURN;
327
328/**
329 *  @brief Ended the delay of a thread.
330 *
331 *  This routine is invoked when a thread must be unblocked at the
332 *  end of a time based delay (i.e. wake after or wake when).
333 *  It is called by the watchdog handler.
334 *
335 *  @param[in] id is the thread id
336 *  @param[in] ignored is not used
337 */
338void _Thread_Delay_ended(
339  Objects_Id  id,
340  void       *ignored
341);
342
343/**
344 * @brief Returns true if the left thread priority is less than the right
345 * thread priority in the intuitive sense of priority and false otherwise.
346 */
347RTEMS_INLINE_ROUTINE bool _Thread_Priority_less_than(
348  Priority_Control left,
349  Priority_Control right
350)
351{
352  return left > right;
353}
354
355/**
356 * @brief Returns the highest priority of the left and right thread priorities
357 * in the intuitive sense of priority.
358 */
359RTEMS_INLINE_ROUTINE Priority_Control _Thread_Priority_highest(
360  Priority_Control left,
361  Priority_Control right
362)
363{
364  return _Thread_Priority_less_than( left, right ) ? right : left;
365}
366
367/**
368 * @brief Filters a thread priority change.
369 *
370 * Called by _Thread_Change_priority() under the protection of the thread lock.
371 *
372 * @param[in] the_thread The thread.
373 * @param[in, out] new_priority The new priority of the thread.  The filter may
374 * alter this value.
375 * @param[in] arg The argument passed to _Thread_Change_priority().
376 *
377 * @retval true Change the current priority.
378 * @retval false Otherwise.
379 */
380typedef bool ( *Thread_Change_priority_filter )(
381  Thread_Control   *the_thread,
382  Priority_Control *new_priority,
383  void             *arg
384);
385
386/**
387 * @brief Changes the priority of a thread if allowed by the filter function.
388 *
389 * It changes current priority of the thread to the new priority in case the
390 * filter function returns true.  In this case the scheduler is notified of the
391 * priority change as well.
392 *
393 * @param[in] the_thread The thread.
394 * @param[in] new_priority The new priority of the thread.
395 * @param[in] arg The argument for the filter function.
396 * @param[in] filter The filter function to determine if a priority change is
397 * allowed and optionally perform other actions under the protection of the
398 * thread lock simultaneously with the update of the current priority.
399 * @param[in] prepend_it In case this is true, then the thread is prepended to
400 * its priority group in its scheduler instance, otherwise it is appended.
401 */
402void _Thread_Change_priority(
403  Thread_Control                *the_thread,
404  Priority_Control               new_priority,
405  void                          *arg,
406  Thread_Change_priority_filter  filter,
407  bool                           prepend_it
408);
409
410/**
411 * @brief Raises the priority of a thread.
412 *
413 * It changes the current priority of the thread to the new priority if the new
414 * priority is higher than the current priority.  In this case the thread is
415 * appended to its new priority group in its scheduler instance.
416 *
417 * @param[in] the_thread The thread.
418 * @param[in] new_priority The new priority of the thread.
419 *
420 * @see _Thread_Change_priority().
421 */
422void _Thread_Raise_priority(
423  Thread_Control   *the_thread,
424  Priority_Control  new_priority
425);
426
427/**
428 * @brief Inherit the priority of a thread.
429 *
430 * It changes the current priority of the inheritor thread to the current priority
431 * of the ancestor thread if it is higher than the current priority of the inheritor
432 * thread.  In this case the inheritor thread is appended to its new priority group
433 * in its scheduler instance.
434 *
435 * On SMP configurations, the priority is changed to PRIORITY_PSEUDO_ISR in
436 * case the own schedulers of the inheritor and ancestor thread differ (priority
437 * boosting).
438 *
439 * @param[in] inheritor The thread to inherit the priority.
440 * @param[in] ancestor The thread to bequeath its priority to the inheritor
441 *   thread.
442 */
443#if defined(RTEMS_SMP)
444void _Thread_Inherit_priority(
445  Thread_Control *inheritor,
446  Thread_Control *ancestor
447);
448#else
449RTEMS_INLINE_ROUTINE void _Thread_Inherit_priority(
450  Thread_Control *inheritor,
451  Thread_Control *ancestor
452)
453{
454  _Thread_Raise_priority( inheritor, ancestor->current_priority );
455}
456#endif
457
458/**
459 * @brief Sets the current to the real priority of a thread.
460 *
461 * Sets the priority restore hint to false.
462 */
463void _Thread_Restore_priority( Thread_Control *the_thread );
464
465/**
466 * @brief Sets the priority of a thread.
467 *
468 * It sets the real priority of the thread.  In addition it changes the current
469 * priority of the thread if the new priority is higher than the current
470 * priority or the thread owns no resources.
471 *
472 * @param[in] the_thread The thread.
473 * @param[in] new_priority The new priority of the thread.
474 * @param[out] old_priority The old real priority of the thread.  This pointer
475 * must not be @c NULL.
476 * @param[in] prepend_it In case this is true, then the thread is prepended to
477 * its priority group in its scheduler instance, otherwise it is appended.
478 *
479 * @see _Thread_Change_priority().
480 */
481void _Thread_Set_priority(
482  Thread_Control   *the_thread,
483  Priority_Control  new_priority,
484  Priority_Control *old_priority,
485  bool              prepend_it
486);
487
488/**
489 *  @brief Maps thread Id to a TCB pointer.
490 *
491 *  This function maps thread IDs to thread control
492 *  blocks.  If ID corresponds to a local thread, then it
493 *  returns the_thread control pointer which maps to ID
494 *  and @a location is set to OBJECTS_LOCAL.  If the thread ID is
495 *  global and resides on a remote node, then location is set
496 *  to OBJECTS_REMOTE, and the_thread is undefined.
497 *  Otherwise, location is set to OBJECTS_ERROR and
498 *  the_thread is undefined.
499 *
500 *  @param[in] id is the id of the thread.
501 *  @param[in] location is the location of the block.
502 *
503 *  @note  The performance of many RTEMS services depends upon
504 *         the quick execution of the "good object" path in this
505 *         routine.  If there is a possibility of saving a few
506 *         cycles off the execution time, this routine is worth
507 *         further optimization attention.
508 */
509Thread_Control *_Thread_Get (
510  Objects_Id         id,
511  Objects_Locations *location
512);
513
514/**
515 * @brief Gets a thread by its identifier.
516 *
517 * @see _Objects_Get_isr_disable().
518 */
519Thread_Control *_Thread_Get_interrupt_disable(
520  Objects_Id         id,
521  Objects_Locations *location,
522  ISR_lock_Context  *lock_context
523);
524
525RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
526  const Thread_Control *thread
527)
528{
529#if defined(RTEMS_SMP)
530  return thread->Scheduler.cpu;
531#else
532  (void) thread;
533
534  return _Per_CPU_Get();
535#endif
536}
537
538RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
539  Thread_Control *thread,
540  Per_CPU_Control *cpu
541)
542{
543#if defined(RTEMS_SMP)
544  thread->Scheduler.cpu = cpu;
545#else
546  (void) thread;
547  (void) cpu;
548#endif
549}
550
551/**
552 * This function returns true if the_thread is the currently executing
553 * thread, and false otherwise.
554 */
555
556RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
557  const Thread_Control *the_thread
558)
559{
560  return ( the_thread == _Thread_Executing );
561}
562
563#if defined(RTEMS_SMP)
564/**
565 * @brief Returns @a true in case the thread executes currently on some
566 * processor in the system, otherwise @a false.
567 *
568 * Do not confuse this with _Thread_Is_executing() which checks only the
569 * current processor.
570 */
571RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
572  const Thread_Control *the_thread
573)
574{
575  return _CPU_Context_Get_is_executing( &the_thread->Registers );
576}
577#endif
578
579/**
580 * @brief Returns @a true and sets time_of_context_switch to the
581 * time of the last context switch when the thread is currently executing
582 * in the system, otherwise @a false.
583 */
584RTEMS_INLINE_ROUTINE bool _Thread_Get_time_of_last_context_switch(
585  Thread_Control    *the_thread,
586  Timestamp_Control *time_of_context_switch
587)
588{
589  bool retval = false;
590
591  _Thread_Disable_dispatch();
592  #ifndef RTEMS_SMP
593    if ( _Thread_Executing->Object.id == the_thread->Object.id ) {
594      *time_of_context_switch = _Thread_Time_of_last_context_switch;
595      retval = true;
596    }
597  #else
598    if ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
599      *time_of_context_switch =
600        _Thread_Get_CPU( the_thread )->time_of_last_context_switch;
601      retval = true;
602    }
603  #endif
604  _Thread_Enable_dispatch();
605  return retval;
606}
607
608
609/**
610 * This function returns true if the_thread is the heir
611 * thread, and false otherwise.
612 */
613
614RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
615  const Thread_Control *the_thread
616)
617{
618  return ( the_thread == _Thread_Heir );
619}
620
621/**
622 * This routine clears any blocking state for the_thread.  It performs
623 * any necessary scheduling operations including the selection of
624 * a new heir thread.
625 */
626
627RTEMS_INLINE_ROUTINE void _Thread_Unblock (
628  Thread_Control *the_thread
629)
630{
631  _Thread_Clear_state( the_thread, STATES_BLOCKED );
632}
633
634/**
635 * This routine resets the current context of the calling thread
636 * to that of its initial state.
637 */
638
639RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
640{
641#if defined(RTEMS_SMP)
642  ISR_Level level;
643
644  _Giant_Release( _Per_CPU_Get() );
645
646  _ISR_Disable_without_giant( level );
647  ( void ) level;
648#endif
649
650#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
651  if ( executing->fp_context != NULL )
652    _Context_Restore_fp( &executing->fp_context );
653#endif
654
655  _CPU_Context_Restart_self( &executing->Registers );
656}
657
658/**
659 * This function returns true if the floating point context of
660 * the_thread is currently loaded in the floating point unit, and
661 * false otherwise.
662 */
663
664#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
665RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
666  const Thread_Control *the_thread
667)
668{
669  return ( the_thread == _Thread_Allocated_fp );
670}
671#endif
672
673/*
674 *  If the CPU has hardware floating point, then we must address saving
675 *  and restoring it as part of the context switch.
676 *
677 *  The second conditional compilation section selects the algorithm used
678 *  to context switch between floating point tasks.  The deferred algorithm
679 *  can be significantly better in a system with few floating point tasks
680 *  because it reduces the total number of save and restore FP context
681 *  operations.  However, this algorithm can not be used on all CPUs due
682 *  to unpredictable use of FP registers by some compilers for integer
683 *  operations.
684 */
685
686RTEMS_INLINE_ROUTINE void _Thread_Save_fp( Thread_Control *executing )
687{
688#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
689#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
690  if ( executing->fp_context != NULL )
691    _Context_Save_fp( &executing->fp_context );
692#endif
693#endif
694}
695
696RTEMS_INLINE_ROUTINE void _Thread_Restore_fp( Thread_Control *executing )
697{
698#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
699#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
700  if ( (executing->fp_context != NULL) &&
701       !_Thread_Is_allocated_fp( executing ) ) {
702    if ( _Thread_Allocated_fp != NULL )
703      _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
704    _Context_Restore_fp( &executing->fp_context );
705    _Thread_Allocated_fp = executing;
706  }
707#else
708  if ( executing->fp_context != NULL )
709    _Context_Restore_fp( &executing->fp_context );
710#endif
711#endif
712}
713
714/**
715 * This routine is invoked when the currently loaded floating
716 * point context is now longer associated with an active thread.
717 */
718
719#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
720RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
721{
722  _Thread_Allocated_fp = NULL;
723}
724#endif
725
726/**
727 * This function returns true if dispatching is disabled, and false
728 * otherwise.
729 */
730
731RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
732{
733  return ( _Thread_Dispatch_necessary );
734}
735
736/**
737 * This function returns true if the_thread is NULL and false otherwise.
738 */
739
740RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
741  const Thread_Control *the_thread
742)
743{
744  return ( the_thread == NULL );
745}
746
747/**
748 * @brief Is proxy blocking.
749 *
750 * status which indicates that a proxy is blocking, and false otherwise.
751 */
752RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
753  uint32_t   code
754)
755{
756  return (code == THREAD_STATUS_PROXY_BLOCKING);
757}
758
759RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
760{
761  /* Idle threads */
762  uint32_t maximum_internal_threads =
763    rtems_configuration_get_maximum_processors();
764
765  /* MPCI thread */
766#if defined(RTEMS_MULTIPROCESSING)
767  if ( _System_state_Is_multiprocessing ) {
768    ++maximum_internal_threads;
769  }
770#endif
771
772  return maximum_internal_threads;
773}
774
775RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
776{
777  return (Thread_Control *)
778    _Objects_Allocate_unprotected( &_Thread_Internal_information.Objects );
779}
780
781/**
782 * @brief Gets the heir of the processor and makes it executing.
783 *
784 * Must be called with interrupts disabled.  The thread dispatch necessary
785 * indicator is cleared as a side-effect.
786 *
787 * @return The heir thread.
788 *
789 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
790 * _Thread_Dispatch_update_heir().
791 */
792RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
793  Per_CPU_Control *cpu_self
794)
795{
796  Thread_Control *heir;
797
798  heir = cpu_self->heir;
799  cpu_self->dispatch_necessary = false;
800  cpu_self->executing = heir;
801
802  return heir;
803}
804
805#if defined( RTEMS_SMP )
806RTEMS_INLINE_ROUTINE void _Thread_Dispatch_update_heir(
807  Per_CPU_Control *cpu_self,
808  Per_CPU_Control *cpu_for_heir,
809  Thread_Control  *heir
810)
811{
812  cpu_for_heir->heir = heir;
813
814  if ( cpu_for_heir == cpu_self ) {
815    cpu_self->dispatch_necessary = true;
816  } else {
817    _Per_CPU_Send_interrupt( cpu_for_heir );
818  }
819}
820#endif
821
822RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used(
823  Thread_Control *executing,
824  Timestamp_Control *time_of_last_context_switch
825)
826{
827  Timestamp_Control uptime;
828  Timestamp_Control ran;
829
830  _TOD_Get_uptime( &uptime );
831  _Timestamp_Subtract(
832    time_of_last_context_switch,
833    &uptime,
834    &ran
835  );
836  *time_of_last_context_switch = uptime;
837  _Timestamp_Add_to( &executing->cpu_time_used, &ran );
838}
839
840RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
841  Thread_Action_control *action_control
842)
843{
844  _Chain_Initialize_empty( &action_control->Chain );
845}
846
847RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
848  Thread_Action *action
849)
850{
851  _Chain_Set_off_chain( &action->Node );
852}
853
854RTEMS_INLINE_ROUTINE Per_CPU_Control *
855  _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level )
856{
857  Per_CPU_Control *cpu;
858
859  _ISR_Disable_without_giant( *level );
860  cpu = _Per_CPU_Get();
861  _Per_CPU_Acquire( cpu );
862
863  return cpu;
864}
865
866RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire(
867  Thread_Control *thread,
868  ISR_Level      *level
869)
870{
871  Per_CPU_Control *cpu;
872
873  _ISR_Disable_without_giant( *level );
874  cpu = _Thread_Get_CPU( thread );
875  _Per_CPU_Acquire( cpu );
876
877  return cpu;
878}
879
880RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable(
881  Per_CPU_Control *cpu,
882  ISR_Level level
883)
884{
885  _Per_CPU_Release_and_ISR_enable( cpu, level );
886}
887
888RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
889  Thread_Control        *thread,
890  Thread_Action         *action,
891  Thread_Action_handler  handler
892)
893{
894  Per_CPU_Control *cpu_of_thread;
895  ISR_Level        level;
896
897  cpu_of_thread = _Thread_Action_ISR_disable_and_acquire( thread, &level );
898
899  action->handler = handler;
900
901#if defined(RTEMS_SMP)
902  if ( _Per_CPU_Get() == cpu_of_thread ) {
903    cpu_of_thread->dispatch_necessary = true;
904  } else {
905    _Per_CPU_Send_interrupt( cpu_of_thread );
906  }
907#else
908  cpu_of_thread->dispatch_necessary = true;
909#endif
910
911  _Chain_Append_if_is_off_chain_unprotected(
912    &thread->Post_switch_actions.Chain,
913    &action->Node
914  );
915
916  _Thread_Action_release_and_ISR_enable( cpu_of_thread, level );
917}
918
919RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
920  Thread_Life_state life_state
921)
922{
923  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
924}
925
926RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
927  Thread_Life_state life_state
928)
929{
930  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
931}
932
933RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected(
934  Thread_Life_state life_state
935)
936{
937  return ( life_state & THREAD_LIFE_PROTECTED ) != 0;
938}
939
940RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
941  Thread_Life_state life_state
942)
943{
944  return ( life_state & THREAD_LIFE_RESTARTING_TERMINATING ) != 0;
945}
946
947/**
948 * @brief Returns true if the thread owns resources, and false otherwise.
949 *
950 * Resources are accounted with the Thread_Control::resource_count resource
951 * counter.  This counter is used by semaphore objects for example.
952 *
953 * In addition to the resource counter there is a resource dependency tree
954 * available on SMP configurations.  In case this tree is non-empty, then the
955 * thread owns resources.
956 *
957 * @param[in] the_thread The thread.
958 */
959RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
960  const Thread_Control *the_thread
961)
962{
963  bool owns_resources = the_thread->resource_count != 0;
964
965#if defined(RTEMS_SMP)
966  owns_resources = owns_resources
967    || _Resource_Node_owns_resources( &the_thread->Resource_node );
968#endif
969
970  return owns_resources;
971}
972
973/**
974 * @brief Acquires the default thread lock inside a critical section
975 * (interrupts disabled).
976 *
977 * @param[in] the_thread The thread.
978 * @param[in] lock_context The lock context used for the corresponding lock
979 * release.
980 *
981 * @see _Thread_Lock_release_default().
982 */
983RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default_critical(
984  Thread_Control   *the_thread,
985  ISR_lock_Context *lock_context
986)
987{
988  _Assert( _ISR_Get_level() != 0 );
989#if defined(RTEMS_SMP)
990  _SMP_ticket_lock_Acquire(
991    &the_thread->Lock.Default,
992    &_Thread_Executing->Lock.Stats,
993    &lock_context->Lock_context.Stats_context
994  );
995#else
996  (void) the_thread;
997  (void) lock_context;
998#endif
999}
1000
1001/**
1002 * @brief Acquires the default thread lock and returns the executing thread.
1003 *
1004 * @param[in] lock_context The lock context used for the corresponding lock
1005 * release.
1006 *
1007 * @return The executing thread.
1008 *
1009 * @see _Thread_Lock_release_default().
1010 */
1011RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Lock_acquire_default_for_executing(
1012  ISR_lock_Context *lock_context
1013)
1014{
1015  Thread_Control *executing;
1016
1017  _ISR_lock_ISR_disable( lock_context );
1018  executing = _Thread_Executing;
1019  _Thread_Lock_acquire_default_critical( executing, lock_context );
1020
1021  return executing;
1022}
1023
1024/**
1025 * @brief Acquires the default thread lock.
1026 *
1027 * @param[in] the_thread The thread.
1028 * @param[in] lock_context The lock context used for the corresponding lock
1029 * release.
1030 *
1031 * @see _Thread_Lock_release_default().
1032 */
1033RTEMS_INLINE_ROUTINE void _Thread_Lock_acquire_default(
1034  Thread_Control   *the_thread,
1035  ISR_lock_Context *lock_context
1036)
1037{
1038  _ISR_lock_ISR_disable( lock_context );
1039  _Thread_Lock_acquire_default_critical( the_thread, lock_context );
1040}
1041
1042/**
1043 * @brief Releases the thread lock inside a critical section (interrupts
1044 * disabled).
1045 *
1046 * The previous interrupt status is not restored.
1047 *
1048 * @param[in] lock The lock.
1049 * @param[in] lock_context The lock context used for the corresponding lock
1050 * acquire.
1051 */
1052RTEMS_INLINE_ROUTINE void _Thread_Lock_release_critical(
1053  void             *lock,
1054  ISR_lock_Context *lock_context
1055)
1056{
1057#if defined(RTEMS_SMP)
1058  _SMP_ticket_lock_Release(
1059    lock,
1060    &lock_context->Lock_context.Stats_context
1061  );
1062#else
1063  (void) lock;
1064  (void) lock_context;
1065#endif
1066}
1067
1068/**
1069 * @brief Releases the thread lock.
1070 *
1071 * @param[in] lock The lock returned by _Thread_Lock_acquire().
1072 * @param[in] lock_context The lock context used for _Thread_Lock_acquire().
1073 */
1074RTEMS_INLINE_ROUTINE void _Thread_Lock_release(
1075  void             *lock,
1076  ISR_lock_Context *lock_context
1077)
1078{
1079  _Thread_Lock_release_critical( lock, lock_context );
1080  _ISR_lock_ISR_enable( lock_context );
1081}
1082
1083/**
1084 * @brief Releases the default thread lock inside a critical section
1085 * (interrupts disabled).
1086 *
1087 * The previous interrupt status is not restored.
1088 *
1089 * @param[in] the_thread The thread.
1090 * @param[in] lock_context The lock context used for the corresponding lock
1091 * acquire.
1092 */
1093RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default_critical(
1094  Thread_Control   *the_thread,
1095  ISR_lock_Context *lock_context
1096)
1097{
1098  _Thread_Lock_release_critical(
1099#if defined(RTEMS_SMP)
1100    &the_thread->Lock.Default,
1101#else
1102    NULL,
1103#endif
1104    lock_context
1105  );
1106}
1107
1108/**
1109 * @brief Releases the default thread lock.
1110 *
1111 * @param[in] the_thread The thread.
1112 * @param[in] lock_context The lock context used for the corresponding lock
1113 * acquire.
1114 */
1115RTEMS_INLINE_ROUTINE void _Thread_Lock_release_default(
1116  Thread_Control   *the_thread,
1117  ISR_lock_Context *lock_context
1118)
1119{
1120  _Thread_Lock_release_default_critical( the_thread, lock_context );
1121  _ISR_lock_ISR_enable( lock_context );
1122}
1123
1124/**
1125 * @brief Acquires the thread lock.
1126 *
1127 * @param[in] the_thread The thread.
1128 * @param[in] lock_context The lock context for _Thread_Lock_release().
1129 *
1130 * @return The lock required by _Thread_Lock_release().
1131 */
1132RTEMS_INLINE_ROUTINE void *_Thread_Lock_acquire(
1133  Thread_Control   *the_thread,
1134  ISR_lock_Context *lock_context
1135)
1136{
1137#if defined(RTEMS_SMP)
1138  SMP_ticket_lock_Control *lock;
1139
1140  while ( true ) {
1141    unsigned int first_generation;
1142    unsigned int second_generation;
1143
1144    _ISR_lock_ISR_disable( lock_context );
1145
1146    /*
1147     * Ensure that we read our first lock generation before we obtain our
1148     * current lock.  See _Thread_Lock_set_unprotected().
1149     */
1150    first_generation = _Atomic_Load_uint(
1151      &the_thread->Lock.generation,
1152      ATOMIC_ORDER_ACQUIRE
1153    );
1154
1155    lock = the_thread->Lock.current;
1156    _SMP_ticket_lock_Acquire(
1157      lock,
1158      &_Thread_Executing->Lock.Stats,
1159      &lock_context->Lock_context.Stats_context
1160    );
1161
1162    /*
1163     * The C11 memory model doesn't guarantee that we read the latest
1164     * generation here.  For this a read-modify-write operation would be
1165     * necessary.  We read at least the new generation set up by the owner of
1166     * our current thread lock, and so on.
1167     */
1168    second_generation = _Atomic_Load_uint(
1169      &the_thread->Lock.generation,
1170      ATOMIC_ORDER_ACQUIRE
1171    );
1172
1173    if ( first_generation == second_generation ) {
1174      return lock;
1175    }
1176
1177    _Thread_Lock_release( lock, lock_context );
1178  }
1179#else
1180  _ISR_Disable( lock_context->isr_level );
1181
1182  return NULL;
1183#endif
1184}
1185
1186#if defined(RTEMS_SMP)
1187/*
1188 * Internal function, use _Thread_Lock_set() or _Thread_Lock_restore_default()
1189 * instead.
1190 */
1191RTEMS_INLINE_ROUTINE void _Thread_Lock_set_unprotected(
1192  Thread_Control          *the_thread,
1193  SMP_ticket_lock_Control *new_lock
1194)
1195{
1196  the_thread->Lock.current = new_lock;
1197
1198  /*
1199   * The generation release corresponds to the generation acquire in
1200   * _Thread_Lock_acquire() and ensures that the new lock and other fields are
1201   * visible to the next thread lock owner.  Otherwise someone would be able to
1202   * read an up to date generation number and an old lock.  See
1203   * _Thread_Wait_set_queue() and _Thread_Wait_restore_default_operations().
1204   *
1205   * Since we set a new lock right before, this increment is not protected by a
1206   * lock and thus must be an atomic operation.
1207   */
1208  _Atomic_Fetch_add_uint(
1209    &the_thread->Lock.generation,
1210    1,
1211    ATOMIC_ORDER_RELEASE
1212  );
1213}
1214#endif
1215
1216/**
1217 * @brief Sets a new thread lock.
1218 *
1219 * The caller must not be the owner of the default thread lock.  The caller
1220 * must be the owner of the new lock.
1221 *
1222 * @param[in] the_thread The thread.
1223 * @param[in] new_lock The new thread lock.
1224 */
1225#if defined(RTEMS_SMP)
1226RTEMS_INLINE_ROUTINE void _Thread_Lock_set(
1227  Thread_Control          *the_thread,
1228  SMP_ticket_lock_Control *new_lock
1229)
1230{
1231  ISR_lock_Context lock_context;
1232
1233  _Thread_Lock_acquire_default_critical( the_thread, &lock_context );
1234  _Assert( the_thread->Lock.current == &the_thread->Lock.Default );
1235  _Thread_Lock_set_unprotected( the_thread, new_lock );
1236  _Thread_Lock_release_default_critical( the_thread, &lock_context );
1237}
1238#else
1239#define _Thread_Lock_set( the_thread, new_lock ) \
1240  do { } while ( 0 )
1241#endif
1242
1243/**
1244 * @brief Restores the default thread lock.
1245 *
1246 * The caller must be the owner of the current thread lock.
1247 *
1248 * @param[in] the_thread The thread.
1249 */
1250#if defined(RTEMS_SMP)
1251RTEMS_INLINE_ROUTINE void _Thread_Lock_restore_default(
1252  Thread_Control *the_thread
1253)
1254{
1255  _Thread_Lock_set_unprotected( the_thread, &the_thread->Lock.Default );
1256}
1257#else
1258#define _Thread_Lock_restore_default( the_thread ) \
1259  do { } while ( 0 )
1260#endif
1261
1262/**
1263 * @brief The initial thread wait flags value set by _Thread_Initialize().
1264 */
1265#define THREAD_WAIT_FLAGS_INITIAL 0x0U
1266
1267/**
1268 * @brief Mask to get the thread wait state flags.
1269 */
1270#define THREAD_WAIT_STATE_MASK 0xffU
1271
1272/**
1273 * @brief Indicates that the thread begins with the blocking operation.
1274 *
1275 * A blocking operation consists of an optional watchdog initialization and the
1276 * setting of the appropriate thread blocking state with the corresponding
1277 * scheduler block operation.
1278 */
1279#define THREAD_WAIT_STATE_INTEND_TO_BLOCK 0x1U
1280
1281/**
1282 * @brief Indicates that the thread completed the blocking operation.
1283 */
1284#define THREAD_WAIT_STATE_BLOCKED 0x2U
1285
1286/**
1287 * @brief Indicates that a condition to end the thread wait occurred.
1288 *
1289 * This could be a timeout, a signal, an event or a resource availability.
1290 */
1291#define THREAD_WAIT_STATE_READY_AGAIN 0x4U
1292
1293/**
1294 * @brief Mask to get the thread wait class flags.
1295 */
1296#define THREAD_WAIT_CLASS_MASK 0xff00U
1297
1298/**
1299 * @brief Indicates that the thread waits for an event.
1300 */
1301#define THREAD_WAIT_CLASS_EVENT 0x100U
1302
1303/**
1304 * @brief Indicates that the thread waits for a system event.
1305 */
1306#define THREAD_WAIT_CLASS_SYSTEM_EVENT 0x200U
1307
1308/**
1309 * @brief Indicates that the thread waits for a object.
1310 */
1311#define THREAD_WAIT_CLASS_OBJECT 0x400U
1312
1313RTEMS_INLINE_ROUTINE void _Thread_Wait_flags_set(
1314  Thread_Control    *the_thread,
1315  Thread_Wait_flags  flags
1316)
1317{
1318#if defined(RTEMS_SMP)
1319  _Atomic_Store_uint( &the_thread->Wait.flags, flags, ATOMIC_ORDER_RELAXED );
1320#else
1321  the_thread->Wait.flags = flags;
1322#endif
1323}
1324
1325RTEMS_INLINE_ROUTINE Thread_Wait_flags _Thread_Wait_flags_get(
1326  const Thread_Control *the_thread
1327)
1328{
1329#if defined(RTEMS_SMP)
1330  return _Atomic_Load_uint( &the_thread->Wait.flags, ATOMIC_ORDER_RELAXED );
1331#else
1332  return the_thread->Wait.flags;
1333#endif
1334}
1335
1336/**
1337 * @brief Tries to change the thread wait flags inside a critical section
1338 * (interrupts disabled).
1339 *
1340 * In case the wait flags are equal to the expected wait flags, then the wait
1341 * flags are set to the desired wait flags.
1342 *
1343 * @param[in] the_thread The thread.
1344 * @param[in] expected_flags The expected wait flags.
1345 * @param[in] desired_flags The desired wait flags.
1346 *
1347 * @retval true The wait flags were equal to the expected wait flags.
1348 * @retval false Otherwise.
1349 */
1350RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change_critical(
1351  Thread_Control    *the_thread,
1352  Thread_Wait_flags  expected_flags,
1353  Thread_Wait_flags  desired_flags
1354)
1355{
1356#if defined(RTEMS_SMP)
1357  return _Atomic_Compare_exchange_uint(
1358    &the_thread->Wait.flags,
1359    &expected_flags,
1360    desired_flags,
1361    ATOMIC_ORDER_RELAXED,
1362    ATOMIC_ORDER_RELAXED
1363  );
1364#else
1365  bool success = the_thread->Wait.flags == expected_flags;
1366
1367  if ( success ) {
1368    the_thread->Wait.flags = desired_flags;
1369  }
1370
1371  return success;
1372#endif
1373}
1374
1375/**
1376 * @brief Tries to change the thread wait flags.
1377 *
1378 * @see _Thread_Wait_flags_try_change_critical().
1379 */
1380RTEMS_INLINE_ROUTINE bool _Thread_Wait_flags_try_change(
1381  Thread_Control    *the_thread,
1382  Thread_Wait_flags  expected_flags,
1383  Thread_Wait_flags  desired_flags
1384)
1385{
1386  bool success;
1387#if !defined(RTEMS_SMP)
1388  ISR_Level level;
1389
1390  _ISR_Disable_without_giant( level );
1391#endif
1392
1393  success = _Thread_Wait_flags_try_change_critical(
1394    the_thread,
1395    expected_flags,
1396    desired_flags
1397  );
1398
1399#if !defined(RTEMS_SMP)
1400  _ISR_Enable_without_giant( level );
1401#endif
1402
1403  return success;
1404}
1405
1406/**
1407 * @brief Sets the thread queue.
1408 *
1409 * The caller must be the owner of the thread lock.
1410 *
1411 * @param[in] the_thread The thread.
1412 * @param[in] new_queue The new queue.
1413 *
1414 * @see _Thread_Lock_set().
1415 */
1416RTEMS_INLINE_ROUTINE void _Thread_Wait_set_queue(
1417  Thread_Control     *the_thread,
1418  Thread_queue_Queue *new_queue
1419)
1420{
1421  the_thread->Wait.queue = new_queue;
1422}
1423
1424/**
1425 * @brief Sets the thread queue operations.
1426 *
1427 * The caller must be the owner of the thread lock.
1428 *
1429 * @param[in] the_thread The thread.
1430 * @param[in] new_operations The new queue operations.
1431 *
1432 * @see _Thread_Lock_set() and _Thread_Wait_restore_default_operations().
1433 */
1434RTEMS_INLINE_ROUTINE void _Thread_Wait_set_operations(
1435  Thread_Control                *the_thread,
1436  const Thread_queue_Operations *new_operations
1437)
1438{
1439  the_thread->Wait.operations = new_operations;
1440}
1441
1442/**
1443 * @brief Restores the default thread queue operations.
1444 *
1445 * The caller must be the owner of the thread lock.
1446 *
1447 * @param[in] the_thread The thread.
1448 *
1449 * @see _Thread_Wait_set_operations().
1450 */
1451RTEMS_INLINE_ROUTINE void _Thread_Wait_restore_default_operations(
1452  Thread_Control *the_thread
1453)
1454{
1455  the_thread->Wait.operations = &_Thread_queue_Operations_default;
1456}
1457
1458/**
1459 * @brief Sets the thread wait timeout code.
1460 *
1461 * @param[in] the_thread The thread.
1462 * @param[in] timeout_code The new thread wait timeout code.
1463 */
1464RTEMS_INLINE_ROUTINE void _Thread_Wait_set_timeout_code(
1465  Thread_Control *the_thread,
1466  uint32_t        timeout_code
1467)
1468{
1469  the_thread->Wait.timeout_code = timeout_code;
1470}
1471
1472/**
1473 * @brief General purpose thread wait timeout.
1474 *
1475 * @param[in] id Unused.
1476 * @param[in] arg The thread.
1477 */
1478void _Thread_Timeout( Objects_Id id, void *arg );
1479
1480RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
1481  Thread_Control  *the_thread,
1482  Per_CPU_Control *cpu
1483)
1484{
1485#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
1486  the_thread->Scheduler.debug_real_cpu = cpu;
1487#else
1488  (void) the_thread;
1489  (void) cpu;
1490#endif
1491}
1492
1493/** @}*/
1494
1495#ifdef __cplusplus
1496}
1497#endif
1498
1499#if defined(RTEMS_MULTIPROCESSING)
1500#include <rtems/score/threadmp.h>
1501#endif
1502
1503#endif
1504/* end of include file */
Note: See TracBrowser for help on using the repository browser.