source: rtems/cpukit/include/rtems/score/schedulerimpl.h @ 11e7893

5
Last change on this file since 11e7893 was 11e7893, checked in by Andreas Dachsberger <andreas.dachsberger@…>, on 04/12/19 at 09:55:49

doxygen: score: adjust doc in schedulerimpl.h to doxygen guidelines

Update #3706.

  • Property mode set to 100644
File size: 38.4 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup RTEMSScoreScheduler
5 *
6 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
7 *
8 * This inline file contains all of the inlined routines associated with
9 * the manipulation of the scheduler.
10 */
11
12/*
13 *  Copyright (C) 2010 Gedare Bloom.
14 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
15 *  Copyright (c) 2014, 2017 embedded brains GmbH
16 *
17 *  The license and distribution terms for this file may be
18 *  found in the file LICENSE in this distribution or at
19 *  http://www.rtems.org/license/LICENSE.
20 */
21
22#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
23#define _RTEMS_SCORE_SCHEDULERIMPL_H
24
25#include <rtems/score/scheduler.h>
26#include <rtems/score/assert.h>
27#include <rtems/score/priorityimpl.h>
28#include <rtems/score/smpimpl.h>
29#include <rtems/score/status.h>
30#include <rtems/score/threadimpl.h>
31
32#ifdef __cplusplus
33extern "C" {
34#endif
35
36/**
37 * @addtogroup RTEMSScoreScheduler
38 *
39 * @{
40 */
41
42/**
43 * @brief Maps a priority value to support the append indicator.
44 */
45#define SCHEDULER_PRIORITY_MAP( priority ) ( ( priority ) << 1 )
46
47/**
48 * @brief Returns the plain priority value.
49 */
50#define SCHEDULER_PRIORITY_UNMAP( priority ) ( ( priority ) >> 1 )
51
52/**
53 * @brief Clears the priority append indicator bit.
54 */
55#define SCHEDULER_PRIORITY_PURIFY( priority )  \
56  ( ( priority ) & ~( (Priority_Control) SCHEDULER_PRIORITY_APPEND_FLAG ) )
57
58/**
59 * @brief Returns the priority control with the append indicator bit set.
60 */
61#define SCHEDULER_PRIORITY_APPEND( priority )  \
62  ( ( priority ) | SCHEDULER_PRIORITY_APPEND_FLAG )
63
64/**
65 * @brief Returns true, if the item should be appended to its priority group,
66 * otherwise returns false and the item should be prepended to its priority
67 * group.
68 */
69#define SCHEDULER_PRIORITY_IS_APPEND( priority ) \
70  ( ( ( priority ) & SCHEDULER_PRIORITY_APPEND_FLAG ) != 0 )
71
72/**
73 * @brief Initializes the scheduler to the policy chosen by the user.
74 *
75 * This routine initializes the scheduler to the policy chosen by the user
76 * through confdefs, or to the priority scheduler with ready chains by
77 * default.
78 */
79void _Scheduler_Handler_initialization( void );
80
81/**
82 * @brief Gets the context of the scheduler.
83 *
84 * @param scheduler The scheduler to get the context of.
85 *
86 * @return The context of @a scheduler.
87 */
88RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
89  const Scheduler_Control *scheduler
90)
91{
92  return scheduler->context;
93}
94
95/**
96 * @brief Gets the scheduler for the cpu.
97 *
98 * @param cpu The cpu control to get the scheduler of.
99 *
100 * @return The scheduler for the cpu.
101 */
102RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
103  const Per_CPU_Control *cpu
104)
105{
106#if defined(RTEMS_SMP)
107  return cpu->Scheduler.control;
108#else
109  (void) cpu;
110  return &_Scheduler_Table[ 0 ];
111#endif
112}
113
114/**
115 * @brief Acquires the scheduler instance inside a critical section (interrupts
116 * disabled).
117 *
118 * @param scheduler The scheduler instance.
119 * @param lock_context The lock context to use for
120 *   _Scheduler_Release_critical().
121 */
122RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
123  const Scheduler_Control *scheduler,
124  ISR_lock_Context        *lock_context
125)
126{
127#if defined(RTEMS_SMP)
128  Scheduler_Context *context;
129
130  context = _Scheduler_Get_context( scheduler );
131  _ISR_lock_Acquire( &context->Lock, lock_context );
132#else
133  (void) scheduler;
134  (void) lock_context;
135#endif
136}
137
138/**
139 * @brief Releases the scheduler instance inside a critical section (interrupts
140 * disabled).
141 *
142 * @param scheduler The scheduler instance.
143 * @param lock_context The lock context used for
144 *   _Scheduler_Acquire_critical().
145 */
146RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
147  const Scheduler_Control *scheduler,
148  ISR_lock_Context        *lock_context
149)
150{
151#if defined(RTEMS_SMP)
152  Scheduler_Context *context;
153
154  context = _Scheduler_Get_context( scheduler );
155  _ISR_lock_Release( &context->Lock, lock_context );
156#else
157  (void) scheduler;
158  (void) lock_context;
159#endif
160}
161
162#if defined(RTEMS_SMP)
163void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
164
165/**
166 * @brief Registers an ask for help request if necessary.
167 *
168 * The actual ask for help operation is carried out during
169 * _Thread_Do_dispatch() on a processor related to the thread.  This yields a
170 * better separation of scheduler instances.  A thread of one scheduler
171 * instance should not be forced to carry out too much work for threads on
172 * other scheduler instances.
173 *
174 * @param the_thread The thread in need for help.
175 */
176RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
177{
178  _Assert( _Thread_State_is_owner( the_thread ) );
179
180  if ( the_thread->Scheduler.helping_nodes > 0 ) {
181    _Scheduler_Request_ask_for_help( the_thread );
182  }
183}
184#endif
185
186/**
187 * The preferred method to add a new scheduler is to define the jump table
188 * entries and add a case to the _Scheduler_Initialize routine.
189 *
190 * Generic scheduling implementations that rely on the ready queue only can
191 * be found in the _Scheduler_queue_XXX functions.
192 */
193
194/*
195 * Passing the Scheduler_Control* to these functions allows for multiple
196 * scheduler's to exist simultaneously, which could be useful on an SMP
197 * system.  Then remote Schedulers may be accessible.  How to protect such
198 * accesses remains an open problem.
199 */
200
201/**
202 * @brief General scheduling decision.
203 *
204 * This kernel routine implements the scheduling decision logic for
205 * the scheduler. It does NOT dispatch.
206 *
207 * @param the_thread The thread which state changed previously.
208 */
209RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
210{
211  const Scheduler_Control *scheduler;
212  ISR_lock_Context         lock_context;
213
214  scheduler = _Thread_Scheduler_get_home( the_thread );
215  _Scheduler_Acquire_critical( scheduler, &lock_context );
216
217  ( *scheduler->Operations.schedule )( scheduler, the_thread );
218
219  _Scheduler_Release_critical( scheduler, &lock_context );
220}
221
222/**
223 * @brief Scheduler yield with a particular thread.
224 *
225 * This routine is invoked when a thread wishes to voluntarily transfer control
226 * of the processor to another thread.
227 *
228 * @param the_thread The yielding thread.
229 */
230RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
231{
232  const Scheduler_Control *scheduler;
233  ISR_lock_Context         lock_context;
234
235  scheduler = _Thread_Scheduler_get_home( the_thread );
236  _Scheduler_Acquire_critical( scheduler, &lock_context );
237  ( *scheduler->Operations.yield )(
238    scheduler,
239    the_thread,
240    _Thread_Scheduler_get_home_node( the_thread )
241  );
242  _Scheduler_Release_critical( scheduler, &lock_context );
243}
244
245/**
246 * @brief Blocks a thread with respect to the scheduler.
247 *
248 * This routine removes @a the_thread from the scheduling decision for
249 * the scheduler. The primary task is to remove the thread from the
250 * ready queue.  It performs any necessary scheduling operations
251 * including the selection of a new heir thread.
252 *
253 * @param the_thread The thread.
254 */
255RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
256{
257#if defined(RTEMS_SMP)
258  Chain_Node              *node;
259  const Chain_Node        *tail;
260  Scheduler_Node          *scheduler_node;
261  const Scheduler_Control *scheduler;
262  ISR_lock_Context         lock_context;
263
264  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
265  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
266
267  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
268  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
269
270  _Scheduler_Acquire_critical( scheduler, &lock_context );
271  ( *scheduler->Operations.block )(
272    scheduler,
273    the_thread,
274    scheduler_node
275  );
276  _Scheduler_Release_critical( scheduler, &lock_context );
277
278  node = _Chain_Next( node );
279
280  while ( node != tail ) {
281    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
282    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
283
284    _Scheduler_Acquire_critical( scheduler, &lock_context );
285    ( *scheduler->Operations.withdraw_node )(
286      scheduler,
287      the_thread,
288      scheduler_node,
289      THREAD_SCHEDULER_BLOCKED
290    );
291    _Scheduler_Release_critical( scheduler, &lock_context );
292
293    node = _Chain_Next( node );
294  }
295#else
296  const Scheduler_Control *scheduler;
297
298  scheduler = _Thread_Scheduler_get_home( the_thread );
299  ( *scheduler->Operations.block )(
300    scheduler,
301    the_thread,
302    _Thread_Scheduler_get_home_node( the_thread )
303  );
304#endif
305}
306
307/**
308 * @brief Unblocks a thread with respect to the scheduler.
309 *
310 * This operation must fetch the latest thread priority value for this
311 * scheduler instance and update its internal state if necessary.
312 *
313 * @param the_thread The thread.
314 *
315 * @see _Scheduler_Node_get_priority().
316 */
317RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
318{
319  Scheduler_Node          *scheduler_node;
320  const Scheduler_Control *scheduler;
321  ISR_lock_Context         lock_context;
322
323#if defined(RTEMS_SMP)
324  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE(
325    _Chain_First( &the_thread->Scheduler.Scheduler_nodes )
326  );
327  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
328#else
329  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
330  scheduler = _Thread_Scheduler_get_home( the_thread );
331#endif
332
333  _Scheduler_Acquire_critical( scheduler, &lock_context );
334  ( *scheduler->Operations.unblock )( scheduler, the_thread, scheduler_node );
335  _Scheduler_Release_critical( scheduler, &lock_context );
336}
337
338/**
339 * @brief Propagates a priority change of a thread to the scheduler.
340 *
341 * On uni-processor configurations, this operation must evaluate the thread
342 * state.  In case the thread is not ready, then the priority update should be
343 * deferred to the next scheduler unblock operation.
344 *
345 * The operation must update the heir and thread dispatch necessary variables
346 * in case the set of scheduled threads changes.
347 *
348 * @param the_thread The thread changing its priority.
349 *
350 * @see _Scheduler_Node_get_priority().
351 */
352RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
353{
354#if defined(RTEMS_SMP)
355  Chain_Node       *node;
356  const Chain_Node *tail;
357
358  _Thread_Scheduler_process_requests( the_thread );
359
360  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
361  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
362
363  do {
364    Scheduler_Node          *scheduler_node;
365    const Scheduler_Control *scheduler;
366    ISR_lock_Context         lock_context;
367
368    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
369    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
370
371    _Scheduler_Acquire_critical( scheduler, &lock_context );
372    ( *scheduler->Operations.update_priority )(
373      scheduler,
374      the_thread,
375      scheduler_node
376    );
377    _Scheduler_Release_critical( scheduler, &lock_context );
378
379    node = _Chain_Next( node );
380  } while ( node != tail );
381#else
382  const Scheduler_Control *scheduler;
383
384  scheduler = _Thread_Scheduler_get_home( the_thread );
385  ( *scheduler->Operations.update_priority )(
386    scheduler,
387    the_thread,
388    _Thread_Scheduler_get_home_node( the_thread )
389  );
390#endif
391}
392
393#if defined(RTEMS_SMP)
394/**
395 * @brief Changes the sticky level of the home scheduler node and propagates a
396 * priority change of a thread to the scheduler.
397 *
398 * @param the_thread The thread changing its priority or sticky level.
399 *
400 * @see _Scheduler_Update_priority().
401 */
402RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
403  Thread_Control *the_thread,
404  int             sticky_level_change
405)
406{
407  Chain_Node              *node;
408  const Chain_Node        *tail;
409  Scheduler_Node          *scheduler_node;
410  const Scheduler_Control *scheduler;
411  ISR_lock_Context         lock_context;
412
413  _Thread_Scheduler_process_requests( the_thread );
414
415  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
416  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
417  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
418
419  _Scheduler_Acquire_critical( scheduler, &lock_context );
420
421  scheduler_node->sticky_level += sticky_level_change;
422  _Assert( scheduler_node->sticky_level >= 0 );
423
424  ( *scheduler->Operations.update_priority )(
425    scheduler,
426    the_thread,
427    scheduler_node
428  );
429
430  _Scheduler_Release_critical( scheduler, &lock_context );
431
432  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
433  node = _Chain_Next( node );
434
435  while ( node != tail ) {
436    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
437    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
438
439    _Scheduler_Acquire_critical( scheduler, &lock_context );
440    ( *scheduler->Operations.update_priority )(
441      scheduler,
442      the_thread,
443      scheduler_node
444    );
445    _Scheduler_Release_critical( scheduler, &lock_context );
446
447    node = _Chain_Next( node );
448  }
449}
450#endif
451
452/**
453 * @brief Maps a thread priority from the user domain to the scheduler domain.
454 *
455 * Let M be the maximum scheduler priority.  The mapping must be bijective in
456 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
457 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
458 * other values the mapping is undefined.
459 *
460 * @param scheduler The scheduler instance.
461 * @param priority The user domain thread priority.
462 *
463 * @return The corresponding thread priority of the scheduler domain is returned.
464 */
465RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
466  const Scheduler_Control *scheduler,
467  Priority_Control         priority
468)
469{
470  return ( *scheduler->Operations.map_priority )( scheduler, priority );
471}
472
473/**
474 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
475 *
476 * @param scheduler The scheduler instance.
477 * @param priority The scheduler domain thread priority.
478 *
479 * @return The corresponding thread priority of the user domain is returned.
480 */
481RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
482  const Scheduler_Control *scheduler,
483  Priority_Control         priority
484)
485{
486  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
487}
488
489/**
490 * @brief Initializes a scheduler node.
491 *
492 * The scheduler node contains arbitrary data on function entry.  The caller
493 * must ensure that _Scheduler_Node_destroy() will be called after a
494 * _Scheduler_Node_initialize() before the memory of the scheduler node is
495 * destroyed.
496 *
497 * @param scheduler The scheduler instance.
498 * @param[out] node The scheduler node to initialize.
499 * @param the_thread The thread of the scheduler node to initialize.
500 * @param priority The thread priority.
501 */
502RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
503  const Scheduler_Control *scheduler,
504  Scheduler_Node          *node,
505  Thread_Control          *the_thread,
506  Priority_Control         priority
507)
508{
509  ( *scheduler->Operations.node_initialize )(
510    scheduler,
511    node,
512    the_thread,
513    priority
514  );
515}
516
517/**
518 * @brief Destroys a scheduler node.
519 *
520 * The caller must ensure that _Scheduler_Node_destroy() will be called only
521 * after a corresponding _Scheduler_Node_initialize().
522 *
523 * @param scheduler The scheduler instance.
524 * @param[out] node The scheduler node to destroy.
525 */
526RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
527  const Scheduler_Control *scheduler,
528  Scheduler_Node          *node
529)
530{
531  ( *scheduler->Operations.node_destroy )( scheduler, node );
532}
533
534/**
535 * @brief Releases a job of a thread with respect to the scheduler.
536 *
537 * @param the_thread The thread.
538 * @param priority_node The priority node of the job.
539 * @param deadline The deadline in watchdog ticks since boot.
540 * @param queue_context The thread queue context to provide the set of
541 *   threads for _Thread_Priority_update().
542 */
543RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
544  Thread_Control       *the_thread,
545  Priority_Node        *priority_node,
546  uint64_t              deadline,
547  Thread_queue_Context *queue_context
548)
549{
550  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
551
552  _Thread_queue_Context_clear_priority_updates( queue_context );
553  ( *scheduler->Operations.release_job )(
554    scheduler,
555    the_thread,
556    priority_node,
557    deadline,
558    queue_context
559  );
560}
561
562/**
563 * @brief Cancels a job of a thread with respect to the scheduler.
564 *
565 * @param the_thread The thread.
566 * @param priority_node The priority node of the job.
567 * @param queue_context The thread queue context to provide the set of
568 *   threads for _Thread_Priority_update().
569 */
570RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
571  Thread_Control       *the_thread,
572  Priority_Node        *priority_node,
573  Thread_queue_Context *queue_context
574)
575{
576  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
577
578  _Thread_queue_Context_clear_priority_updates( queue_context );
579  ( *scheduler->Operations.cancel_job )(
580    scheduler,
581    the_thread,
582    priority_node,
583    queue_context
584  );
585}
586
587/**
588 * @brief Scheduler method invoked at each clock tick.
589 *
590 * This method is invoked at each clock tick to allow the scheduler
591 * implementation to perform any activities required.  For the
592 * scheduler which support standard RTEMS features, this includes
593 * time-slicing management.
594 *
595 * @param cpu The cpu control for the operation.
596 */
597RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
598{
599  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
600  Thread_Control *executing = cpu->executing;
601
602  if ( scheduler != NULL && executing != NULL ) {
603    ( *scheduler->Operations.tick )( scheduler, executing );
604  }
605}
606
607/**
608 * @brief Starts the idle thread for a particular processor.
609 *
610 * @param scheduler The scheduler instance.
611 * @param[in,out] the_thread The idle thread for the processor.
612 * @param[in,out] cpu The processor for the idle thread.
613 *
614 * @see _Thread_Create_idle().
615 */
616RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
617  const Scheduler_Control *scheduler,
618  Thread_Control          *the_thread,
619  Per_CPU_Control         *cpu
620)
621{
622  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
623}
624
625/**
626 * @brief Checks if the scheduler of the cpu with the given index is equal
627 *      to the given scheduler.
628 *
629 * @param scheduler The scheduler for the comparison.
630 * @param cpu_index The index of the cpu for the comparison.
631 *
632 * @retval true The scheduler of the cpu is the given @a scheduler.
633 * @retval false The scheduler of the cpu is not the given @a scheduler.
634 */
635RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
636  const Scheduler_Control *scheduler,
637  uint32_t                 cpu_index
638)
639{
640#if defined(RTEMS_SMP)
641  const Per_CPU_Control   *cpu;
642  const Scheduler_Control *scheduler_of_cpu;
643
644  cpu = _Per_CPU_Get_by_index( cpu_index );
645  scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
646
647  return scheduler_of_cpu == scheduler;
648#else
649  (void) scheduler;
650  (void) cpu_index;
651
652  return true;
653#endif
654}
655
656/**
657 * @brief Gets the processors of the scheduler
658 *
659 * @param scheduler The scheduler to get the processors of.
660 *
661 * @return The processors of the context of the given scheduler.
662 */
663RTEMS_INLINE_ROUTINE const Processor_mask *_Scheduler_Get_processors(
664  const Scheduler_Control *scheduler
665)
666{
667#if defined(RTEMS_SMP)
668  return &_Scheduler_Get_context( scheduler )->Processors;
669#else
670  return &_Processor_mask_The_one_and_only;
671#endif
672}
673
674/**
675 * @brief Copies the thread's scheduler's affinity to the given cpuset.
676 *
677 * @param the_thread The thread to get the affinity of its scheduler.
678 * @param cpusetsize The size of @a cpuset.
679 * @param[out] cpuset The cpuset that serves as destination for the copy operation
680 *
681 * @retval true The copy operation was lossless.
682 * @retval false The copy operation was not lossless
683 */
684bool _Scheduler_Get_affinity(
685  Thread_Control *the_thread,
686  size_t          cpusetsize,
687  cpu_set_t      *cpuset
688);
689
690/**
691 * @brief Checks if the affinity is a subset of the online processors.
692 *
693 * @param scheduler This parameter is unused.
694 * @param the_thread This parameter is unused.
695 * @param node This parameter is unused.
696 * @param affinity The processor mask to check.
697 *
698 * @retval true @a affinity is a subset of the online processors.
699 * @retval false @a affinity is not a subset of the online processors.
700 */
701RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
702  const Scheduler_Control *scheduler,
703  Thread_Control          *the_thread,
704  Scheduler_Node          *node,
705  const Processor_mask    *affinity
706)
707{
708  (void) scheduler;
709  (void) the_thread;
710  (void) node;
711  return _Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() );
712}
713
714/**
715 * @brief Sets the thread's scheduler's affinity.
716 *
717 * @param[in, out] the_thread The thread to set the affinity of.
718 * @param cpusetsize The size of @a cpuset.
719 * @param cpuset The cpuset to set the affinity.
720 *
721 * @retval true The operation succeeded.
722 * @retval false The operation did not succeed.
723 */
724bool _Scheduler_Set_affinity(
725  Thread_Control  *the_thread,
726  size_t           cpusetsize,
727  const cpu_set_t *cpuset
728);
729
730/**
731 * @brief Blocks the thread.
732 *
733 * @param scheduler The scheduler instance.
734 * @param the_thread The thread to block.
735 * @param node The corresponding scheduler node.
736 * @param extract Method to extract the thread.
737 * @param schedule Method for scheduling threads.
738 */
739RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
740  const Scheduler_Control *scheduler,
741  Thread_Control          *the_thread,
742  Scheduler_Node          *node,
743  void                  ( *extract )(
744                             const Scheduler_Control *,
745                             Thread_Control *,
746                             Scheduler_Node *
747                        ),
748  void                  ( *schedule )(
749                             const Scheduler_Control *,
750                             Thread_Control *,
751                             bool
752                        )
753)
754{
755  ( *extract )( scheduler, the_thread, node );
756
757  /* TODO: flash critical section? */
758
759  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
760    ( *schedule )( scheduler, the_thread, true );
761  }
762}
763
764/**
765 * @brief Gets the number of processors of the scheduler.
766 *
767 * @param scheduler The scheduler instance to get the number of processors of.
768 *
769 * @return The number of processors.
770 */
771RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
772  const Scheduler_Control *scheduler
773)
774{
775#if defined(RTEMS_SMP)
776  const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
777
778  return _Processor_mask_Count( &context->Processors );
779#else
780  (void) scheduler;
781
782  return 1;
783#endif
784}
785
786/**
787 * @brief Builds an object build id.
788 *
789 * @param scheduler_index The index to build the build id out of.
790 *
791 * @return The build id.
792 */
793RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
794{
795  return _Objects_Build_id(
796    OBJECTS_FAKE_OBJECTS_API,
797    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
798    _Objects_Local_node,
799    (uint16_t) ( scheduler_index + 1 )
800  );
801}
802
803/**
804 * @brief Gets the scheduler index from the given object build id.
805 *
806 * @param id The object build id.
807 *
808 * @return The scheduler index.
809 */
810RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
811{
812  uint32_t minimum_id = _Scheduler_Build_id( 0 );
813
814  return id - minimum_id;
815}
816
817/**
818 * @brief Gets the scheduler from the given object build id.
819 *
820 * @param id The object build id.
821 *
822 * @return The scheduler to the object id.
823 */
824RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
825  Objects_Id id
826)
827{
828  uint32_t index;
829
830  index = _Scheduler_Get_index_by_id( id );
831
832  if ( index >= _Scheduler_Count ) {
833    return NULL;
834  }
835
836  return &_Scheduler_Table[ index ];
837}
838
839/**
840 * @brief Gets the index of the scheduler
841 *
842 * @param scheduler The scheduler to get the index of.
843 *
844 * @return The index of the given scheduler.
845 */
846RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
847  const Scheduler_Control *scheduler
848)
849{
850  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
851}
852
853#if defined(RTEMS_SMP)
854/**
855 * @brief Gets an idle thread from the scheduler instance.
856 *
857 * @param context The scheduler instance context.
858 *
859 * @return idle An idle thread for use.  This function must always return an
860 * idle thread.  If none is available, then this is a fatal error.
861 */
862typedef Thread_Control *( *Scheduler_Get_idle_thread )(
863  Scheduler_Context *context
864);
865
866/**
867 * @brief Releases an idle thread to the scheduler instance for reuse.
868 *
869 * @param context The scheduler instance context.
870 * @param idle The idle thread to release.
871 */
872typedef void ( *Scheduler_Release_idle_thread )(
873  Scheduler_Context *context,
874  Thread_Control    *idle
875);
876
877/**
878 * @brief Changes the threads state to the given new state.
879 *
880 * @param[out] the_thread The thread to change the state of.
881 * @param new_state The new state for @a the_thread.
882 */
883RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
884  Thread_Control         *the_thread,
885  Thread_Scheduler_state  new_state
886)
887{
888  _Assert(
889    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
890      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
891      || !_System_state_Is_up( _System_state_Get() )
892  );
893
894  the_thread->Scheduler.state = new_state;
895}
896
897/**
898 * @brief Sets the scheduler node's idle thread.
899 *
900 * @param[in, out] node The node to receive an idle thread.
901 * @param idle The idle thread control for the operation.
902 */
903RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
904  Scheduler_Node *node,
905  Thread_Control *idle
906)
907{
908  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
909  _Assert(
910    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
911  );
912
913  _Scheduler_Node_set_user( node, idle );
914  node->idle = idle;
915}
916
917/**
918 * @brief Uses an idle thread for this scheduler node.
919 *
920 * A thread whose home scheduler node has a sticky level greater than zero may
921 * use an idle thread in the home scheduler instance in the case it executes
922 * currently in another scheduler instance or in the case it is in a blocking
923 * state.
924 *
925 * @param context The scheduler instance context.
926 * @param[in, out] node The node which wants to use the idle thread.
927 * @param cpu The processor for the idle thread.
928 * @param get_idle_thread Function to get an idle thread.
929 */
930RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
931  Scheduler_Context         *context,
932  Scheduler_Node            *node,
933  Per_CPU_Control           *cpu,
934  Scheduler_Get_idle_thread  get_idle_thread
935)
936{
937  Thread_Control *idle = ( *get_idle_thread )( context );
938
939  _Scheduler_Set_idle_thread( node, idle );
940  _Thread_Set_CPU( idle, cpu );
941  return idle;
942}
943
944typedef enum {
945  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
946  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
947  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
948} Scheduler_Try_to_schedule_action;
949
950/**
951 * @brief Tries to schedule this scheduler node.
952 *
953 * @param context The scheduler instance context.
954 * @param[in, out] node The node which wants to get scheduled.
955 * @param idle A potential idle thread used by a potential victim node.
956 * @param get_idle_thread Function to get an idle thread.
957 *
958 * @retval true This node can be scheduled.
959 * @retval false This node cannot be scheduled.
960 */
961RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
962_Scheduler_Try_to_schedule_node(
963  Scheduler_Context         *context,
964  Scheduler_Node            *node,
965  Thread_Control            *idle,
966  Scheduler_Get_idle_thread  get_idle_thread
967)
968{
969  ISR_lock_Context                  lock_context;
970  Scheduler_Try_to_schedule_action  action;
971  Thread_Control                   *owner;
972
973  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
974  owner = _Scheduler_Node_get_owner( node );
975  _Assert( _Scheduler_Node_get_user( node ) == owner );
976  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
977
978  _Thread_Scheduler_acquire_critical( owner, &lock_context );
979
980  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
981    _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
982    _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
983  } else if (
984    owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
985      && node->sticky_level <= 1
986  ) {
987    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
988  } else if ( node->sticky_level == 0 ) {
989    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
990  } else if ( idle != NULL ) {
991    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
992  } else {
993    _Scheduler_Use_idle_thread(
994      context,
995      node,
996      _Thread_Get_CPU( owner ),
997      get_idle_thread
998    );
999  }
1000
1001  _Thread_Scheduler_release_critical( owner, &lock_context );
1002  return action;
1003}
1004
1005/**
1006 * @brief Releases an idle thread using this scheduler node.
1007 *
1008 * @param context The scheduler instance context.
1009 * @param[in, out] node The node which may have an idle thread as user.
1010 * @param release_idle_thread Function to release an idle thread.
1011 *
1012 * @retval idle The idle thread which used this node.
1013 * @retval NULL This node had no idle thread as an user.
1014 */
1015RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1016  Scheduler_Context             *context,
1017  Scheduler_Node                *node,
1018  Scheduler_Release_idle_thread  release_idle_thread
1019)
1020{
1021  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1022
1023  if ( idle != NULL ) {
1024    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1025
1026    node->idle = NULL;
1027    _Scheduler_Node_set_user( node, owner );
1028    ( *release_idle_thread )( context, idle );
1029  }
1030
1031  return idle;
1032}
1033
1034/**
1035 * @brief Exchanges an idle thread from the scheduler node that uses it
1036 *      right now to another scheduler node.
1037 *
1038 * @param needs_idle The scheduler node that needs an idle thread.
1039 * @param uses_idle The scheduler node that used the idle thread.
1040 * @param idle The idle thread that is exchanged.
1041 */
1042RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1043  Scheduler_Node *needs_idle,
1044  Scheduler_Node *uses_idle,
1045  Thread_Control *idle
1046)
1047{
1048  uses_idle->idle = NULL;
1049  _Scheduler_Node_set_user(
1050    uses_idle,
1051    _Scheduler_Node_get_owner( uses_idle )
1052  );
1053  _Scheduler_Set_idle_thread( needs_idle, idle );
1054}
1055
1056/**
1057 * @brief Blocks this scheduler node.
1058 *
1059 * @param context The scheduler instance context.
1060 * @param[in, out] thread The thread which wants to get blocked referencing this
1061 *   node.  This is not necessarily the user of this node in case the node
1062 *   participates in the scheduler helping protocol.
1063 * @param[in, out] node The node which wants to get blocked.
1064 * @param is_scheduled This node is scheduled.
1065 * @param get_idle_thread Function to get an idle thread.
1066 *
1067 * @retval thread_cpu The processor of the thread.  Indicates to continue with
1068 *   the blocking operation.
1069 * @retval NULL Otherwise.
1070 */
1071RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
1072  Scheduler_Context         *context,
1073  Thread_Control            *thread,
1074  Scheduler_Node            *node,
1075  bool                       is_scheduled,
1076  Scheduler_Get_idle_thread  get_idle_thread
1077)
1078{
1079  int               sticky_level;
1080  ISR_lock_Context  lock_context;
1081  Per_CPU_Control  *thread_cpu;
1082
1083  sticky_level = node->sticky_level;
1084  --sticky_level;
1085  node->sticky_level = sticky_level;
1086  _Assert( sticky_level >= 0 );
1087
1088  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1089  thread_cpu = _Thread_Get_CPU( thread );
1090  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
1091  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1092  _Thread_Scheduler_release_critical( thread, &lock_context );
1093
1094  if ( sticky_level > 0 ) {
1095    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
1096      Thread_Control *idle;
1097
1098      idle = _Scheduler_Use_idle_thread(
1099        context,
1100        node,
1101        thread_cpu,
1102        get_idle_thread
1103      );
1104      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
1105    }
1106
1107    return NULL;
1108  }
1109
1110  _Assert( thread == _Scheduler_Node_get_user( node ) );
1111  return thread_cpu;
1112}
1113
1114/**
1115 * @brief Discard the idle thread from the scheduler node.
1116 *
1117 * @param context The scheduler context.
1118 * @param[in, out] the_thread The thread for the operation.
1119 * @param[in, out] node The scheduler node to discard the idle thread from.
1120 * @param release_idle_thread Method to release the idle thread from the context.
1121 */
1122RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
1123  Scheduler_Context             *context,
1124  Thread_Control                *the_thread,
1125  Scheduler_Node                *node,
1126  Scheduler_Release_idle_thread  release_idle_thread
1127)
1128{
1129  Thread_Control  *idle;
1130  Thread_Control  *owner;
1131  Per_CPU_Control *cpu;
1132
1133  idle = _Scheduler_Node_get_idle( node );
1134  owner = _Scheduler_Node_get_owner( node );
1135
1136  node->idle = NULL;
1137  _Assert( _Scheduler_Node_get_user( node ) == idle );
1138  _Scheduler_Node_set_user( node, owner );
1139  ( *release_idle_thread )( context, idle );
1140
1141  cpu = _Thread_Get_CPU( idle );
1142  _Thread_Set_CPU( the_thread, cpu );
1143  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
1144}
1145
1146/**
1147 * @brief Unblocks this scheduler node.
1148 *
1149 * @param context The scheduler instance context.
1150 * @param[in, out] the_thread The thread which wants to get unblocked.
1151 * @param[in, out] node The node which wants to get unblocked.
1152 * @param is_scheduled This node is scheduled.
1153 * @param release_idle_thread Function to release an idle thread.
1154 *
1155 * @retval true Continue with the unblocking operation.
1156 * @retval false Do not continue with the unblocking operation.
1157 */
1158RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1159  Scheduler_Context             *context,
1160  Thread_Control                *the_thread,
1161  Scheduler_Node                *node,
1162  bool                           is_scheduled,
1163  Scheduler_Release_idle_thread  release_idle_thread
1164)
1165{
1166  bool unblock;
1167
1168  ++node->sticky_level;
1169  _Assert( node->sticky_level > 0 );
1170
1171  if ( is_scheduled ) {
1172    _Scheduler_Discard_idle_thread(
1173      context,
1174      the_thread,
1175      node,
1176      release_idle_thread
1177    );
1178    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1179    unblock = false;
1180  } else {
1181    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1182    unblock = true;
1183  }
1184
1185  return unblock;
1186}
1187#endif
1188
1189/**
1190 * @brief Updates the heir.
1191 *
1192 * @param[in, out] new_heir The new heir.
1193 * @param force_dispatch Indicates whether the dispatch happens also if the
1194 *      currently running thread is set as not preemptible.
1195 */
1196RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1197  Thread_Control *new_heir,
1198  bool            force_dispatch
1199)
1200{
1201  Thread_Control *heir = _Thread_Heir;
1202
1203  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1204#if defined(RTEMS_SMP)
1205    /*
1206     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1207     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1208     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1209     * schedulers.
1210     */
1211    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1212    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1213#endif
1214    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1215    _Thread_Heir = new_heir;
1216    _Thread_Dispatch_necessary = true;
1217  }
1218}
1219
1220/**
1221 * @brief Sets a new scheduler.
1222 *
1223 * @param new_scheduler The new scheduler to set.
1224 * @param[in, out] the_thread The thread for the operations.
1225 * @param priority The initial priority for the thread with the new scheduler.
1226 *
1227 * @retval STATUS_SUCCESSFUL The operation succeeded.
1228 * @retval STATUS_RESOURCE_IN_USE The thread's wait queue is not empty.
1229 * @retval STATUS_UNSATISFIED The new scheduler has no processors.
1230 */
1231RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1232  const Scheduler_Control *new_scheduler,
1233  Thread_Control          *the_thread,
1234  Priority_Control         priority
1235)
1236{
1237  Scheduler_Node          *new_scheduler_node;
1238  Scheduler_Node          *old_scheduler_node;
1239#if defined(RTEMS_SMP)
1240  ISR_lock_Context         lock_context;
1241  const Scheduler_Control *old_scheduler;
1242
1243#endif
1244
1245  if ( the_thread->Wait.queue != NULL ) {
1246    return STATUS_RESOURCE_IN_USE;
1247  }
1248
1249  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1250  _Priority_Plain_extract(
1251    &old_scheduler_node->Wait.Priority,
1252    &the_thread->Real_priority
1253  );
1254
1255  if (
1256    !_Priority_Is_empty( &old_scheduler_node->Wait.Priority )
1257#if defined(RTEMS_SMP)
1258      || !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes )
1259      || the_thread->Scheduler.pin_level != 0
1260#endif
1261  ) {
1262    _Priority_Plain_insert(
1263      &old_scheduler_node->Wait.Priority,
1264      &the_thread->Real_priority,
1265      the_thread->Real_priority.priority
1266    );
1267    return STATUS_RESOURCE_IN_USE;
1268  }
1269
1270#if defined(RTEMS_SMP)
1271  old_scheduler = _Thread_Scheduler_get_home( the_thread );
1272  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1273    the_thread,
1274    _Scheduler_Get_index( new_scheduler )
1275  );
1276
1277  _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1278
1279  if (
1280    _Scheduler_Get_processor_count( new_scheduler ) == 0
1281      || !( *new_scheduler->Operations.set_affinity )(
1282        new_scheduler,
1283        the_thread,
1284        new_scheduler_node,
1285        &the_thread->Scheduler.Affinity
1286      )
1287  ) {
1288    _Scheduler_Release_critical( new_scheduler, &lock_context );
1289    _Priority_Plain_insert(
1290      &old_scheduler_node->Wait.Priority,
1291      &the_thread->Real_priority,
1292      the_thread->Real_priority.priority
1293    );
1294    return STATUS_UNSATISFIED;
1295  }
1296
1297  _Assert( the_thread->Scheduler.pinned_scheduler == NULL );
1298  the_thread->Scheduler.home_scheduler = new_scheduler;
1299
1300  _Scheduler_Release_critical( new_scheduler, &lock_context );
1301
1302  _Thread_Scheduler_process_requests( the_thread );
1303#else
1304  new_scheduler_node = old_scheduler_node;
1305#endif
1306
1307  the_thread->Start.initial_priority = priority;
1308  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1309  _Priority_Initialize_one(
1310    &new_scheduler_node->Wait.Priority,
1311    &the_thread->Real_priority
1312  );
1313
1314#if defined(RTEMS_SMP)
1315  if ( old_scheduler != new_scheduler ) {
1316    States_Control current_state;
1317
1318    current_state = the_thread->current_state;
1319
1320    if ( _States_Is_ready( current_state ) ) {
1321      _Scheduler_Block( the_thread );
1322    }
1323
1324    _Assert( old_scheduler_node->sticky_level == 0 );
1325    _Assert( new_scheduler_node->sticky_level == 0 );
1326
1327    _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1328    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1329    _Chain_Initialize_one(
1330      &the_thread->Scheduler.Wait_nodes,
1331      &new_scheduler_node->Thread.Wait_node
1332    );
1333    _Chain_Extract_unprotected(
1334      &old_scheduler_node->Thread.Scheduler_node.Chain
1335    );
1336    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1337    _Chain_Initialize_one(
1338      &the_thread->Scheduler.Scheduler_nodes,
1339      &new_scheduler_node->Thread.Scheduler_node.Chain
1340    );
1341
1342    _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1343
1344    if ( _States_Is_ready( current_state ) ) {
1345      _Scheduler_Unblock( the_thread );
1346    }
1347
1348    return STATUS_SUCCESSFUL;
1349  }
1350#endif
1351
1352  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1353  _Scheduler_Update_priority( the_thread );
1354  return STATUS_SUCCESSFUL;
1355}
1356
1357/** @} */
1358
1359#ifdef __cplusplus
1360}
1361#endif
1362
1363#endif
1364/* end of include file */
Note: See TracBrowser for help on using the repository browser.