source: rtems/cpukit/score/include/rtems/score/threadimpl.h @ 509040f0

4.115
Last change on this file since 509040f0 was c5831a3f, checked in by Sebastian Huber <sebastian.huber@…>, on 04/09/14 at 13:07:54

score: Add clustered/partitioned scheduling

Clustered/partitioned scheduling helps to control the worst-case
latencies in the system. The goal is to reduce the amount of shared
state in the system and thus prevention of lock contention. Modern
multi-processor systems tend to have several layers of data and
instruction caches. With clustered/partitioned scheduling it is
possible to honour the cache topology of a system and thus avoid
expensive cache synchronization traffic.

We have clustered scheduling in case the set of processors of a system
is partitioned into non-empty pairwise-disjoint subsets. These subsets
are called clusters. Clusters with a cardinality of one are partitions.
Each cluster is owned by exactly one scheduler instance.

  • Property mode set to 100644
File size: 19.7 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines from the Thread Handler
5 *
6 * This file contains the macro implementation of the inlined
7 * routines from the Thread handler.
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-2008.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2014 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifndef _RTEMS_SCORE_THREADIMPL_H
22#define _RTEMS_SCORE_THREADIMPL_H
23
24#include <rtems/score/thread.h>
25#include <rtems/score/chainimpl.h>
26#include <rtems/score/interr.h>
27#include <rtems/score/isr.h>
28#include <rtems/score/objectimpl.h>
29#include <rtems/score/statesimpl.h>
30#include <rtems/score/sysstate.h>
31#include <rtems/score/todimpl.h>
32#include <rtems/config.h>
33
34#ifdef __cplusplus
35extern "C" {
36#endif
37
38/**
39 * @addtogroup ScoreThread
40 */
41/**@{**/
42
43/**
44 *  The following structure contains the information necessary to manage
45 *  a thread which it is  waiting for a resource.
46 */
47#define THREAD_STATUS_PROXY_BLOCKING 0x1111111
48
49/**
50 *  Self for the GNU Ada Run-Time
51 */
52SCORE_EXTERN void *rtems_ada_self;
53
54/**
55 *  The following defines the information control block used to
56 *  manage this class of objects.
57 */
58SCORE_EXTERN Objects_Information _Thread_Internal_information;
59
60/**
61 *  The following points to the thread whose floating point
62 *  context is currently loaded.
63 */
64#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
65SCORE_EXTERN Thread_Control *_Thread_Allocated_fp;
66#endif
67
68#if !defined(__DYNAMIC_REENT__)
69/**
70 * The C library re-enter-rant global pointer. Some C library implementations
71 * such as newlib have a single global pointer that changed during a context
72 * switch. The pointer points to that global pointer. The Thread control block
73 * holds a pointer to the task specific data.
74 */
75SCORE_EXTERN struct _reent **_Thread_libc_reent;
76#endif
77
78/**
79 *  @brief Initialize thread handler.
80 *
81 *  This routine performs the initialization necessary for this handler.
82 */
83void _Thread_Handler_initialization(void);
84
85/**
86 *  @brief Create idle thread.
87 *
88 *  This routine creates the idle thread.
89 *
90 *  @warning No thread should be created before this one.
91 */
92void _Thread_Create_idle(void);
93
94/**
95 *  @brief Start thread multitasking.
96 *
97 *  This routine initiates multitasking.  It is invoked only as
98 *  part of initialization and its invocation is the last act of
99 *  the non-multitasking part of the system initialization.
100 */
101void _Thread_Start_multitasking( void ) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
102
103/**
104 *  @brief Allocate the requested stack space for the thread.
105 *
106 *  Allocate the requested stack space for the thread.
107 *  Set the Start.stack field to the address of the stack.
108 *
109 *  @param[in] the_thread is the thread where the stack space is requested
110 *
111 *  @retval actual size allocated after any adjustment
112 *  @retval zero if the allocation failed
113 */
114size_t _Thread_Stack_Allocate(
115  Thread_Control *the_thread,
116  size_t          stack_size
117);
118
119/**
120 *  @brief Deallocate thread stack.
121 *
122 *  Deallocate the Thread's stack.
123 */
124void _Thread_Stack_Free(
125  Thread_Control *the_thread
126);
127
128/**
129 *  @brief Initialize thread.
130 *
131 *  This routine initializes the specified the thread.  It allocates
132 *  all memory associated with this thread.  It completes by adding
133 *  the thread to the local object table so operations on this
134 *  thread id are allowed.
135 *
136 *  @note If stack_area is NULL, it is allocated from the workspace.
137 *
138 *  @note If the stack is allocated from the workspace, then it is
139 *        guaranteed to be of at least minimum size.
140 */
141bool _Thread_Initialize(
142  Objects_Information                  *information,
143  Thread_Control                       *the_thread,
144  const struct Scheduler_Control       *scheduler,
145  void                                 *stack_area,
146  size_t                                stack_size,
147  bool                                  is_fp,
148  Priority_Control                      priority,
149  bool                                  is_preemptible,
150  Thread_CPU_budget_algorithms          budget_algorithm,
151  Thread_CPU_budget_algorithm_callout   budget_callout,
152  uint32_t                              isr_level,
153  Objects_Name                          name
154);
155
156/**
157 *  @brief Initializes thread and executes it.
158 *
159 *  This routine initializes the executable information for a thread
160 *  and makes it ready to execute.  After this routine executes, the
161 *  thread competes with all other threads for CPU time.
162 *
163 *  @param the_thread is the thread to be initialized
164 *  @param the_prototype
165 *  @param entry_point
166 *  @param pointer_argument
167 *  @param numeric_argument
168 *  @param[in,out] cpu The processor if used to start an idle thread
169 *  during system initialization.  Must be set to @c NULL to start a normal
170 *  thread.
171 */
172bool _Thread_Start(
173  Thread_Control            *the_thread,
174  Thread_Start_types         the_prototype,
175  void                      *entry_point,
176  void                      *pointer_argument,
177  Thread_Entry_numeric_type  numeric_argument,
178  Per_CPU_Control           *cpu
179);
180
181bool _Thread_Restart(
182  Thread_Control            *the_thread,
183  Thread_Control            *executing,
184  void                      *pointer_argument,
185  Thread_Entry_numeric_type  numeric_argument
186);
187
188bool _Thread_Set_life_protection( bool protect );
189
190void _Thread_Life_action_handler(
191  Thread_Control  *executing,
192  Thread_Action   *action,
193  Per_CPU_Control *cpu,
194  ISR_Level        level
195);
196
197/**
198 * @brief Kills all zombie threads in the system.
199 *
200 * Threads change into the zombie state as the last step in the thread
201 * termination sequence right before a context switch to the heir thread is
202 * initiated.  Since the thread stack is still in use during this phase we have
203 * to postpone the thread stack reclamation until this point.  On SMP
204 * configurations we may have to busy wait for context switch completion here.
205 */
206void _Thread_Kill_zombies( void );
207
208/**
209 * @brief Closes the thread.
210 *
211 * Closes the thread object and starts the thread termination sequence.  In
212 * case the executing thread is not terminated, then this function waits until
213 * the terminating thread reached the zombie state.
214 */
215void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
216
217/**
218 *  @brief Removes any set states for @a the_thread.
219 *
220 *  This routine removes any set states for @a the_thread.  It performs
221 *  any necessary scheduling operations including the selection of
222 *  a new heir thread.
223 *
224 *  - INTERRUPT LATENCY:
225 *    + ready chain
226 *    + select heir
227 */
228void _Thread_Ready(
229  Thread_Control *the_thread
230);
231
232/**
233 *  @brief Clears the indicated STATES for @a the_thread.
234 *
235 *  This routine clears the indicated STATES for @a the_thread.  It performs
236 *  any necessary scheduling operations including the selection of
237 *  a new heir thread.
238 *
239 *  - INTERRUPT LATENCY:
240 *    + priority map
241 *    + select heir
242 */
243void _Thread_Clear_state(
244  Thread_Control *the_thread,
245  States_Control  state
246);
247
248/**
249 *  @brief Sets the indicated @a state for @a the_thread.
250 *
251 *  This routine sets the indicated @a state for @a the_thread.  It performs
252 *  any necessary scheduling operations including the selection of
253 *  a new heir thread.
254 *
255 *  @param[in] the_thread is the thread to set the state for.
256 *  @param[in] state is the state to set the_thread to.
257 *
258 *  - INTERRUPT LATENCY:
259 *   + ready chain
260 *   + select map
261 */
262void _Thread_Set_state(
263  Thread_Control *the_thread,
264  States_Control  state
265);
266
267/**
268 *  @brief Sets the transient state for a thread.
269 *
270 *  This routine sets the Transient state for @a the_thread.  It performs
271 *  any necessary scheduling operations including the selection of
272 *  a new heir thread.
273 *
274 *  @param[in] the_thread is the thread to preform the action upon.
275 *
276 *  - INTERRUPT LATENCY:
277 *    + single case
278 */
279void _Thread_Set_transient(
280  Thread_Control *the_thread
281);
282
283/**
284 *  @brief Initializes enviroment for a thread.
285 *
286 *  This routine initializes the context of @a the_thread to its
287 *  appropriate starting state.
288 *
289 *  @param[in] the_thread is the pointer to the thread control block.
290 */
291void _Thread_Load_environment(
292  Thread_Control *the_thread
293);
294
295/**
296 *  @brief Wrapper function for all threads.
297 *
298 *  This routine is the wrapper function for all threads.  It is
299 *  the starting point for all threads.  The user provided thread
300 *  entry point is invoked by this routine.  Operations
301 *  which must be performed immediately before and after the user's
302 *  thread executes are found here.
303 *
304 *  @note On entry, it is assumed all interrupts are blocked and that this
305 *  routine needs to set the initial isr level.  This may or may not
306 *  actually be needed by the context switch routine and as a result
307 *  interrupts may already be at there proper level.  Either way,
308 *  setting the initial isr level properly here is safe.
309 */
310void _Thread_Handler( void );
311
312/**
313 *  @brief Ended the delay of a thread.
314 *
315 *  This routine is invoked when a thread must be unblocked at the
316 *  end of a time based delay (i.e. wake after or wake when).
317 *  It is called by the watchdog handler.
318 *
319 *  @param[in] id is the thread id
320 */
321void _Thread_Delay_ended(
322  Objects_Id  id,
323  void       *ignored
324);
325
326/**
327 *  @brief Change the priority of a thread.
328 *
329 *  This routine changes the current priority of @a the_thread to
330 *  @a new_priority.  It performs any necessary scheduling operations
331 *  including the selection of a new heir thread.
332 *
333 *  @param[in] the_thread is the thread to change
334 *  @param[in] new_priority is the priority to set @a the_thread to
335 *  @param[in] prepend_it is a switch to prepend the thread
336 */
337void _Thread_Change_priority (
338  Thread_Control   *the_thread,
339  Priority_Control  new_priority,
340  bool              prepend_it
341);
342
343/**
344 *  @brief Set thread priority.
345 *
346 *  This routine updates the priority related fields in the_thread
347 *  control block to indicate the current priority is now new_priority.
348 */
349void _Thread_Set_priority(
350  Thread_Control   *the_thread,
351  Priority_Control  new_priority
352);
353
354/**
355 *  This routine updates the related suspend fields in the_thread
356 *  control block to indicate the current nested level.
357 */
358#define _Thread_Suspend( _the_thread ) \
359        _Thread_Set_state( _the_thread, STATES_SUSPENDED )
360
361/**
362 *  This routine updates the related suspend fields in the_thread
363 *  control block to indicate the current nested level.  A force
364 *  parameter of true will force a resume and clear the suspend count.
365 */
366#define _Thread_Resume( _the_thread ) \
367        _Thread_Clear_state( _the_thread, STATES_SUSPENDED )
368
369/**
370 *  @brief Maps thread Id to a TCB pointer.
371 *
372 *  This function maps thread IDs to thread control
373 *  blocks.  If ID corresponds to a local thread, then it
374 *  returns the_thread control pointer which maps to ID
375 *  and @a location is set to OBJECTS_LOCAL.  If the thread ID is
376 *  global and resides on a remote node, then location is set
377 *  to OBJECTS_REMOTE, and the_thread is undefined.
378 *  Otherwise, location is set to OBJECTS_ERROR and
379 *  the_thread is undefined.
380 *
381 *  @param[in] id is the id of the thread.
382 *  @param[in] location is the location of the block.
383 *
384 *  @note  The performance of many RTEMS services depends upon
385 *         the quick execution of the "good object" path in this
386 *         routine.  If there is a possibility of saving a few
387 *         cycles off the execution time, this routine is worth
388 *         further optimization attention.
389 */
390Thread_Control *_Thread_Get (
391  Objects_Id         id,
392  Objects_Locations *location
393);
394
395/**
396 *  @brief Cancel a blocking operation due to ISR.
397 *
398 *  This method is used to cancel a blocking operation that was
399 *  satisfied from an ISR while the thread executing was in the
400 *  process of blocking.
401 *
402 *  This method will restore the previous ISR disable level during the cancel
403 *  operation.  Thus it is an implicit _ISR_Enable().
404 *
405 *  @param[in] sync_state is the synchronization state
406 *  @param[in] the_thread is the thread whose blocking is canceled
407 *  @param[in] level is the previous ISR disable level
408 *
409 *  @note This is a rare routine in RTEMS.  It is called with
410 *        interrupts disabled and only when an ISR completed
411 *        a blocking condition in process.
412 */
413void _Thread_blocking_operation_Cancel(
414  Thread_blocking_operation_States  sync_state,
415  Thread_Control                   *the_thread,
416  ISR_Level                         level
417);
418
419RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU(
420  const Thread_Control *thread
421)
422{
423#if defined(RTEMS_SMP)
424  return thread->cpu;
425#else
426  (void) thread;
427
428  return _Per_CPU_Get();
429#endif
430}
431
432RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(
433  Thread_Control *thread,
434  Per_CPU_Control *cpu
435)
436{
437#if defined(RTEMS_SMP)
438  thread->cpu = cpu;
439#else
440  (void) thread;
441  (void) cpu;
442#endif
443}
444
445/**
446 * This function returns true if the_thread is the currently executing
447 * thread, and false otherwise.
448 */
449
450RTEMS_INLINE_ROUTINE bool _Thread_Is_executing (
451  const Thread_Control *the_thread
452)
453{
454  return ( the_thread == _Thread_Executing );
455}
456
457/**
458 * This function returns true if the_thread is the heir
459 * thread, and false otherwise.
460 */
461
462RTEMS_INLINE_ROUTINE bool _Thread_Is_heir (
463  const Thread_Control *the_thread
464)
465{
466  return ( the_thread == _Thread_Heir );
467}
468
469/**
470 * This routine clears any blocking state for the_thread.  It performs
471 * any necessary scheduling operations including the selection of
472 * a new heir thread.
473 */
474
475RTEMS_INLINE_ROUTINE void _Thread_Unblock (
476  Thread_Control *the_thread
477)
478{
479  _Thread_Clear_state( the_thread, STATES_BLOCKED );
480}
481
482/**
483 * This routine resets the current context of the calling thread
484 * to that of its initial state.
485 */
486
487RTEMS_INLINE_ROUTINE void _Thread_Restart_self( Thread_Control *executing )
488{
489#if defined(RTEMS_SMP)
490  ISR_Level level;
491
492  _Giant_Release();
493
494  _Per_CPU_ISR_disable_and_acquire( _Per_CPU_Get(), level );
495  ( void ) level;
496#endif
497
498#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
499  if ( executing->fp_context != NULL )
500    _Context_Restore_fp( &executing->fp_context );
501#endif
502
503  _CPU_Context_Restart_self( &executing->Registers );
504}
505
506/**
507 * This function returns true if the floating point context of
508 * the_thread is currently loaded in the floating point unit, and
509 * false otherwise.
510 */
511
512#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
513RTEMS_INLINE_ROUTINE bool _Thread_Is_allocated_fp (
514  const Thread_Control *the_thread
515)
516{
517  return ( the_thread == _Thread_Allocated_fp );
518}
519#endif
520
521/**
522 * This routine is invoked when the currently loaded floating
523 * point context is now longer associated with an active thread.
524 */
525
526#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
527RTEMS_INLINE_ROUTINE void _Thread_Deallocate_fp( void )
528{
529  _Thread_Allocated_fp = NULL;
530}
531#endif
532
533/**
534 * This function returns true if dispatching is disabled, and false
535 * otherwise.
536 */
537
538RTEMS_INLINE_ROUTINE bool _Thread_Is_context_switch_necessary( void )
539{
540  return ( _Thread_Dispatch_necessary );
541}
542
543/**
544 * This function returns true if the_thread is NULL and false otherwise.
545 */
546
547RTEMS_INLINE_ROUTINE bool _Thread_Is_null (
548  const Thread_Control *the_thread
549)
550{
551  return ( the_thread == NULL );
552}
553
554/**
555 * @brief Is proxy blocking.
556 *
557 * status which indicates that a proxy is blocking, and false otherwise.
558 */
559RTEMS_INLINE_ROUTINE bool _Thread_Is_proxy_blocking (
560  uint32_t   code
561)
562{
563  return (code == THREAD_STATUS_PROXY_BLOCKING);
564}
565
566RTEMS_INLINE_ROUTINE uint32_t _Thread_Get_maximum_internal_threads(void)
567{
568  /* Idle threads */
569  uint32_t maximum_internal_threads =
570    rtems_configuration_get_maximum_processors();
571
572  /* MPCI thread */
573#if defined(RTEMS_MULTIPROCESSING)
574  if ( _System_state_Is_multiprocessing ) {
575    ++maximum_internal_threads;
576  }
577#endif
578
579  return maximum_internal_threads;
580}
581
582RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Internal_allocate( void )
583{
584  return (Thread_Control *)
585    _Objects_Allocate_unprotected( &_Thread_Internal_information );
586}
587
588RTEMS_INLINE_ROUTINE void _Thread_Request_dispatch_if_executing(
589  Thread_Control *thread
590)
591{
592#if defined(RTEMS_SMP)
593  if ( thread->is_executing ) {
594    const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
595    Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
596
597    cpu_of_thread->dispatch_necessary = true;
598
599    if ( cpu_of_executing != cpu_of_thread ) {
600      _Per_CPU_Send_interrupt( cpu_of_thread );
601    }
602  }
603#else
604  (void) thread;
605#endif
606}
607
608RTEMS_INLINE_ROUTINE void _Thread_Signal_notification( Thread_Control *thread )
609{
610  if ( _ISR_Is_in_progress() && _Thread_Is_executing( thread ) ) {
611    _Thread_Dispatch_necessary = true;
612  } else {
613#if defined(RTEMS_SMP)
614    if ( thread->is_executing ) {
615      const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
616      Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
617
618      if ( cpu_of_executing != cpu_of_thread ) {
619        cpu_of_thread->dispatch_necessary = true;
620        _Per_CPU_Send_interrupt( cpu_of_thread );
621      }
622    }
623#endif
624  }
625}
626
627RTEMS_INLINE_ROUTINE void _Thread_Update_cpu_time_used(
628  Thread_Control *executing,
629  Timestamp_Control *time_of_last_context_switch
630)
631{
632  Timestamp_Control uptime;
633  Timestamp_Control ran;
634
635  _TOD_Get_uptime( &uptime );
636  _Timestamp_Subtract(
637    time_of_last_context_switch,
638    &uptime,
639    &ran
640  );
641  *time_of_last_context_switch = uptime;
642  _Timestamp_Add_to( &executing->cpu_time_used, &ran );
643}
644
645RTEMS_INLINE_ROUTINE void _Thread_Action_control_initialize(
646  Thread_Action_control *action_control
647)
648{
649  _Chain_Initialize_empty( &action_control->Chain );
650}
651
652RTEMS_INLINE_ROUTINE void _Thread_Action_initialize(
653  Thread_Action         *action,
654  Thread_Action_handler  handler
655)
656{
657  action->handler = handler;
658  _Chain_Set_off_chain( &action->Node );
659}
660
661RTEMS_INLINE_ROUTINE Per_CPU_Control *
662  _Thread_Action_ISR_disable_and_acquire_for_executing( ISR_Level *level )
663{
664  Per_CPU_Control *cpu;
665
666  _ISR_Disable_without_giant( *level );
667  cpu = _Per_CPU_Get();
668  _Per_CPU_Acquire( cpu );
669
670  return cpu;
671}
672
673RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Action_ISR_disable_and_acquire(
674  Thread_Control *thread,
675  ISR_Level      *level
676)
677{
678  Per_CPU_Control *cpu;
679
680  _ISR_Disable_without_giant( *level );
681  cpu = _Thread_Get_CPU( thread );
682  _Per_CPU_Acquire( cpu );
683
684  return cpu;
685}
686
687RTEMS_INLINE_ROUTINE void _Thread_Action_release_and_ISR_enable(
688  Per_CPU_Control *cpu,
689  ISR_Level level
690)
691{
692  _Per_CPU_Release_and_ISR_enable( cpu, level );
693}
694
695RTEMS_INLINE_ROUTINE void _Thread_Add_post_switch_action(
696  Thread_Control *thread,
697  Thread_Action  *action
698)
699{
700  Per_CPU_Control *cpu;
701  ISR_Level        level;
702
703  cpu = _Thread_Action_ISR_disable_and_acquire( thread, &level );
704  _Chain_Append_if_is_off_chain_unprotected(
705    &thread->Post_switch_actions.Chain,
706    &action->Node
707  );
708  _Thread_Action_release_and_ISR_enable( cpu, level );
709}
710
711RTEMS_INLINE_ROUTINE bool _Thread_Is_life_restarting(
712  Thread_Life_state life_state
713)
714{
715  return ( life_state & THREAD_LIFE_RESTARTING ) != 0;
716}
717
718RTEMS_INLINE_ROUTINE bool _Thread_Is_life_terminating(
719  Thread_Life_state life_state
720)
721{
722  return ( life_state & THREAD_LIFE_TERMINATING ) != 0;
723}
724
725RTEMS_INLINE_ROUTINE bool _Thread_Is_life_protected(
726  Thread_Life_state life_state
727)
728{
729  return ( life_state & THREAD_LIFE_PROTECTED ) != 0;
730}
731
732RTEMS_INLINE_ROUTINE bool _Thread_Is_life_changing(
733  Thread_Life_state life_state
734)
735{
736  return ( life_state & THREAD_LIFE_RESTARTING_TERMINTING ) != 0;
737}
738
739#if !defined(__DYNAMIC_REENT__)
740/**
741 * This routine returns the C library re-enterant pointer.
742 */
743
744RTEMS_INLINE_ROUTINE struct _reent **_Thread_Get_libc_reent( void )
745{
746  return _Thread_libc_reent;
747}
748
749/**
750 * This routine set the C library re-enterant pointer.
751 */
752
753RTEMS_INLINE_ROUTINE void _Thread_Set_libc_reent (
754  struct _reent **libc_reent
755)
756{
757  _Thread_libc_reent = libc_reent;
758}
759#endif
760
761/** @}*/
762
763#ifdef __cplusplus
764}
765#endif
766
767#if defined(RTEMS_MULTIPROCESSING)
768#include <rtems/score/threadmp.h>
769#endif
770
771#endif
772/* end of include file */
Note: See TracBrowser for help on using the repository browser.