source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 3a27248

5
Last change on this file since 3a27248 was 3a27248, checked in by Sebastian Huber <sebastian.huber@…>, on 10/21/16 at 07:23:58

score: First part of new MrsP implementation

Update #2556.

  • Property mode set to 100644
File size: 47.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/cpusetimpl.h>
26#include <rtems/score/priorityimpl.h>
27#include <rtems/score/smpimpl.h>
28#include <rtems/score/status.h>
29#include <rtems/score/threadimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35/**
36 * @addtogroup ScoreScheduler
37 */
38/**@{**/
39
40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
57  const Thread_Control *the_thread
58)
59{
60#if defined(RTEMS_SMP)
61  return the_thread->Scheduler.control;
62#else
63  (void) the_thread;
64
65  return &_Scheduler_Table[ 0 ];
66#endif
67}
68
69RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
70  const Thread_Control *the_thread
71)
72{
73#if defined(RTEMS_SMP)
74  return the_thread->Scheduler.own_control;
75#else
76  (void) the_thread;
77
78  return &_Scheduler_Table[ 0 ];
79#endif
80}
81
82RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
83  uint32_t cpu_index
84)
85{
86#if defined(RTEMS_SMP)
87  return _Scheduler_Assignments[ cpu_index ].scheduler;
88#else
89  (void) cpu_index;
90
91  return &_Scheduler_Table[ 0 ];
92#endif
93}
94
95RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
96  const Per_CPU_Control *cpu
97)
98{
99  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
100
101  return _Scheduler_Get_by_CPU_index( cpu_index );
102}
103
104/**
105 * @brief Acquires the scheduler instance inside a critical section (interrupts
106 * disabled).
107 *
108 * @param[in] scheduler The scheduler instance.
109 * @param[in] lock_context The lock context to use for
110 *   _Scheduler_Release_critical().
111 */
112RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
113  const Scheduler_Control *scheduler,
114  ISR_lock_Context        *lock_context
115)
116{
117#if defined(RTEMS_SMP)
118  Scheduler_Context *context;
119
120  context = _Scheduler_Get_context( scheduler );
121  _ISR_lock_Acquire( &context->Lock, lock_context );
122#else
123  (void) scheduler;
124  (void) lock_context;
125#endif
126}
127
128/**
129 * @brief Releases the scheduler instance inside a critical section (interrupts
130 * disabled).
131 *
132 * @param[in] scheduler The scheduler instance.
133 * @param[in] lock_context The lock context used for
134 *   _Scheduler_Acquire_critical().
135 */
136RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
137  const Scheduler_Control *scheduler,
138  ISR_lock_Context        *lock_context
139)
140{
141#if defined(RTEMS_SMP)
142  Scheduler_Context *context;
143
144  context = _Scheduler_Get_context( scheduler );
145  _ISR_lock_Release( &context->Lock, lock_context );
146#else
147  (void) scheduler;
148  (void) lock_context;
149#endif
150}
151
152RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
153  const Thread_Control *the_thread
154)
155{
156#if defined(RTEMS_SMP)
157  return the_thread->Scheduler.node;
158#else
159  return the_thread->Scheduler.nodes;
160#endif
161}
162
163/**
164 * The preferred method to add a new scheduler is to define the jump table
165 * entries and add a case to the _Scheduler_Initialize routine.
166 *
167 * Generic scheduling implementations that rely on the ready queue only can
168 * be found in the _Scheduler_queue_XXX functions.
169 */
170
171/*
172 * Passing the Scheduler_Control* to these functions allows for multiple
173 * scheduler's to exist simultaneously, which could be useful on an SMP
174 * system.  Then remote Schedulers may be accessible.  How to protect such
175 * accesses remains an open problem.
176 */
177
178/**
179 * @brief General scheduling decision.
180 *
181 * This kernel routine implements the scheduling decision logic for
182 * the scheduler. It does NOT dispatch.
183 *
184 * @param[in] the_thread The thread which state changed previously.
185 */
186RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
187{
188  const Scheduler_Control *scheduler;
189  ISR_lock_Context         lock_context;
190
191  scheduler = _Scheduler_Get( the_thread );
192  _Scheduler_Acquire_critical( scheduler, &lock_context );
193
194  ( *scheduler->Operations.schedule )( scheduler, the_thread );
195
196  _Scheduler_Release_critical( scheduler, &lock_context );
197}
198
199#if defined(RTEMS_SMP)
200typedef struct {
201  Thread_Control *needs_help;
202  Thread_Control *next_needs_help;
203} Scheduler_Ask_for_help_context ;
204
205RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
206  Resource_Node *resource_node,
207  void          *arg
208)
209{
210  bool done;
211  Scheduler_Ask_for_help_context *help_context = arg;
212  Thread_Control *previous_needs_help = help_context->needs_help;
213  Thread_Control *next_needs_help;
214  Thread_Control *offers_help =
215    THREAD_RESOURCE_NODE_TO_THREAD( resource_node );
216  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
217
218  next_needs_help = ( *scheduler->Operations.ask_for_help_X )(
219    scheduler,
220    offers_help,
221    previous_needs_help
222  );
223
224  done = next_needs_help != previous_needs_help;
225
226  if ( done ) {
227    help_context->next_needs_help = next_needs_help;
228  }
229
230  return done;
231}
232
233/**
234 * @brief Ask threads depending on resources owned by the thread for help.
235 *
236 * A thread is in need for help if it lost its assigned processor due to
237 * pre-emption by a higher priority thread or it was not possible to assign it
238 * a processor since its priority is to low on its current scheduler instance.
239 *
240 * The run-time of this function depends on the size of the resource tree of
241 * the thread needing help and other resource trees in case threads in need for
242 * help are produced during this operation.
243 *
244 * @param[in] needs_help The thread needing help.
245 */
246RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_X(
247  Thread_Control *needs_help
248)
249{
250  do {
251    const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );
252
253    needs_help = ( *scheduler->Operations.ask_for_help_X )(
254      scheduler,
255      needs_help,
256      needs_help
257    );
258
259    if ( needs_help != NULL ) {
260      Scheduler_Ask_for_help_context help_context = { needs_help, NULL };
261
262      _Resource_Iterate(
263        &needs_help->Resource_node,
264        _Scheduler_Ask_for_help_visitor,
265        &help_context
266      );
267
268      needs_help = help_context.next_needs_help;
269    }
270  } while ( needs_help != NULL );
271}
272
273RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
274  Thread_Control *needs_help
275)
276{
277  if (
278    needs_help != NULL
279      && _Resource_Node_owns_resources( &needs_help->Resource_node )
280  ) {
281    Scheduler_Node *node = _Thread_Scheduler_get_own_node( needs_help );
282
283    if (
284      node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
285        || _Scheduler_Node_get_user( node ) != needs_help
286    ) {
287      _Scheduler_Ask_for_help_X( needs_help );
288    }
289  }
290}
291#endif
292
293/**
294 * @brief Scheduler yield with a particular thread.
295 *
296 * This routine is invoked when a thread wishes to voluntarily transfer control
297 * of the processor to another thread.
298 *
299 * @param[in] the_thread The yielding thread.
300 */
301RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
302{
303#if defined(RTEMS_SMP)
304  Chain_Node              *node;
305  const Chain_Node        *tail;
306  Scheduler_Node          *scheduler_node;
307  const Scheduler_Control *scheduler;
308  ISR_lock_Context         lock_context;
309  Thread_Control          *needs_help;
310
311  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
312  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
313
314  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
315  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
316
317  _Scheduler_Acquire_critical( scheduler, &lock_context );
318  needs_help = ( *scheduler->Operations.yield )(
319    scheduler,
320    the_thread,
321    _Thread_Scheduler_get_home_node( the_thread )
322  );
323  _Scheduler_Ask_for_help_if_necessary( needs_help );
324  _Scheduler_Release_critical( scheduler, &lock_context );
325
326  if ( needs_help != the_thread ) {
327    return;
328  }
329
330  node = _Chain_Next( node );
331
332  while ( node != tail ) {
333    bool success;
334
335    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
336    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
337
338    _Scheduler_Acquire_critical( scheduler, &lock_context );
339    success = ( *scheduler->Operations.ask_for_help )(
340      scheduler,
341      the_thread,
342      scheduler_node
343    );
344    _Scheduler_Release_critical( scheduler, &lock_context );
345
346    if ( success ) {
347      break;
348    }
349
350    node = _Chain_Next( node );
351  }
352#else
353  const Scheduler_Control *scheduler;
354
355  scheduler = _Scheduler_Get( the_thread );
356  ( *scheduler->Operations.yield )(
357    scheduler,
358    the_thread,
359    _Thread_Scheduler_get_home_node( the_thread )
360  );
361#endif
362}
363
364/**
365 * @brief Blocks a thread with respect to the scheduler.
366 *
367 * This routine removes @a the_thread from the scheduling decision for
368 * the scheduler. The primary task is to remove the thread from the
369 * ready queue.  It performs any necessary schedulering operations
370 * including the selection of a new heir thread.
371 *
372 * @param[in] the_thread The thread.
373 */
374RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
375{
376#if defined(RTEMS_SMP)
377  Chain_Node              *node;
378  const Chain_Node        *tail;
379  Scheduler_Node          *scheduler_node;
380  const Scheduler_Control *scheduler;
381  ISR_lock_Context         lock_context;
382
383  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
384  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
385
386  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
387  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
388
389  _Scheduler_Acquire_critical( scheduler, &lock_context );
390  ( *scheduler->Operations.block )(
391    scheduler,
392    the_thread,
393    scheduler_node
394  );
395  _Scheduler_Release_critical( scheduler, &lock_context );
396
397  node = _Chain_Next( node );
398
399  while ( node != tail ) {
400    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
401    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
402
403    _Scheduler_Acquire_critical( scheduler, &lock_context );
404    ( *scheduler->Operations.withdraw_node )(
405      scheduler,
406      the_thread,
407      scheduler_node,
408      THREAD_SCHEDULER_BLOCKED
409    );
410    _Scheduler_Release_critical( scheduler, &lock_context );
411
412    node = _Chain_Next( node );
413  }
414#else
415  const Scheduler_Control *scheduler;
416
417  scheduler = _Scheduler_Get( the_thread );
418  ( *scheduler->Operations.block )(
419    scheduler,
420    the_thread,
421    _Thread_Scheduler_get_home_node( the_thread )
422  );
423#endif
424}
425
426/**
427 * @brief Unblocks a thread with respect to the scheduler.
428 *
429 * This operation must fetch the latest thread priority value for this
430 * scheduler instance and update its internal state if necessary.
431 *
432 * @param[in] the_thread The thread.
433 *
434 * @see _Scheduler_Node_get_priority().
435 */
436RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
437{
438#if defined(RTEMS_SMP)
439  Chain_Node              *node;
440  const Chain_Node        *tail;
441  Scheduler_Node          *scheduler_node;
442  const Scheduler_Control *scheduler;
443  ISR_lock_Context         lock_context;
444  Thread_Control          *needs_help;
445
446  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
447  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
448
449  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
450  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
451
452  _Scheduler_Acquire_critical( scheduler, &lock_context );
453  needs_help = ( *scheduler->Operations.unblock )(
454    scheduler,
455    the_thread,
456    scheduler_node
457  );
458  _Scheduler_Ask_for_help_if_necessary( needs_help );
459  _Scheduler_Release_critical( scheduler, &lock_context );
460
461  if ( needs_help != the_thread ) {
462    return;
463  }
464
465  node = _Chain_Next( node );
466
467  while ( node != tail ) {
468    bool success;
469
470    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
471    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
472
473    _Scheduler_Acquire_critical( scheduler, &lock_context );
474    success = ( *scheduler->Operations.ask_for_help )(
475      scheduler,
476      the_thread,
477      scheduler_node
478    );
479    _Scheduler_Release_critical( scheduler, &lock_context );
480
481    if ( success ) {
482      break;
483    }
484
485    node = _Chain_Next( node );
486  }
487#else
488  const Scheduler_Control *scheduler;
489
490  scheduler = _Scheduler_Get( the_thread );
491  ( *scheduler->Operations.unblock )(
492    scheduler,
493    the_thread,
494    _Thread_Scheduler_get_home_node( the_thread )
495  );
496#endif
497}
498
499/**
500 * @brief Propagates a priority change of a thread to the scheduler.
501 *
502 * On uni-processor configurations, this operation must evaluate the thread
503 * state.  In case the thread is not ready, then the priority update should be
504 * deferred to the next scheduler unblock operation.
505 *
506 * The operation must update the heir and thread dispatch necessary variables
507 * in case the set of scheduled threads changes.
508 *
509 * @param[in] the_thread The thread changing its priority.
510 *
511 * @see _Scheduler_Node_get_priority().
512 */
513RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
514{
515#if defined(RTEMS_SMP)
516  Chain_Node       *node;
517  const Chain_Node *tail;
518
519  _Thread_Scheduler_process_requests( the_thread );
520
521  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
522  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
523
524  do {
525    Scheduler_Node          *scheduler_node;
526    const Scheduler_Control *scheduler;
527    ISR_lock_Context         lock_context;
528    Thread_Control          *needs_help;
529
530    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
531    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
532
533    _Scheduler_Acquire_critical( scheduler, &lock_context );
534    needs_help = ( *scheduler->Operations.update_priority )(
535      scheduler,
536      the_thread,
537      scheduler_node
538    );
539    _Scheduler_Ask_for_help_if_necessary( needs_help );
540    _Scheduler_Release_critical( scheduler, &lock_context );
541
542    node = _Chain_Next( node );
543  } while ( node != tail );
544#else
545  const Scheduler_Control *scheduler;
546
547  scheduler = _Scheduler_Get( the_thread );
548  ( *scheduler->Operations.update_priority )(
549    scheduler,
550    the_thread,
551    _Thread_Scheduler_get_home_node( the_thread )
552  );
553#endif
554}
555
556#if defined(RTEMS_SMP)
557/**
558 * @brief Changes the sticky level of the home scheduler node and propagates a
559 * priority change of a thread to the scheduler.
560 *
561 * @param[in] the_thread The thread changing its priority or sticky level.
562 *
563 * @see _Scheduler_Update_priority().
564 */
565RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
566  Thread_Control *the_thread,
567  int             sticky_level_change
568)
569{
570  Chain_Node              *node;
571  const Chain_Node        *tail;
572  Scheduler_Node          *scheduler_node;
573  const Scheduler_Control *scheduler;
574  ISR_lock_Context         lock_context;
575
576  _Thread_Scheduler_process_requests( the_thread );
577
578  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
579  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
580  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
581
582  _Scheduler_Acquire_critical( scheduler, &lock_context );
583
584  ( *scheduler->Operations.update_priority )(
585    scheduler,
586    the_thread,
587    scheduler_node
588  );
589
590  _Scheduler_Release_critical( scheduler, &lock_context );
591
592  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
593  node = _Chain_Next( node );
594
595  while ( node != tail ) {
596    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
597    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
598
599    _Scheduler_Acquire_critical( scheduler, &lock_context );
600    ( *scheduler->Operations.update_priority )(
601      scheduler,
602      the_thread,
603      scheduler_node
604    );
605    _Scheduler_Release_critical( scheduler, &lock_context );
606
607    node = _Chain_Next( node );
608  }
609}
610#endif
611
612/**
613 * @brief Maps a thread priority from the user domain to the scheduler domain.
614 *
615 * Let M be the maximum scheduler priority.  The mapping must be bijective in
616 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
617 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
618 * other values the mapping is undefined.
619 *
620 * @param[in] scheduler The scheduler instance.
621 * @param[in] priority The user domain thread priority.
622 *
623 * @return The corresponding thread priority of the scheduler domain is returned.
624 */
625RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
626  const Scheduler_Control *scheduler,
627  Priority_Control         priority
628)
629{
630  return ( *scheduler->Operations.map_priority )( scheduler, priority );
631}
632
633/**
634 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
635 *
636 * @param[in] scheduler The scheduler instance.
637 * @param[in] priority The scheduler domain thread priority.
638 *
639 * @return The corresponding thread priority of the user domain is returned.
640 */
641RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
642  const Scheduler_Control *scheduler,
643  Priority_Control         priority
644)
645{
646  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
647}
648
649/**
650 * @brief Initializes a scheduler node.
651 *
652 * The scheduler node contains arbitrary data on function entry.  The caller
653 * must ensure that _Scheduler_Node_destroy() will be called after a
654 * _Scheduler_Node_initialize() before the memory of the scheduler node is
655 * destroyed.
656 *
657 * @param[in] scheduler The scheduler instance.
658 * @param[in] node The scheduler node to initialize.
659 * @param[in] the_thread The thread of the scheduler node to initialize.
660 * @param[in] priority The thread priority.
661 */
662RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
663  const Scheduler_Control *scheduler,
664  Scheduler_Node          *node,
665  Thread_Control          *the_thread,
666  Priority_Control         priority
667)
668{
669  ( *scheduler->Operations.node_initialize )(
670    scheduler,
671    node,
672    the_thread,
673    priority
674  );
675}
676
677/**
678 * @brief Destroys a scheduler node.
679 *
680 * The caller must ensure that _Scheduler_Node_destroy() will be called only
681 * after a corresponding _Scheduler_Node_initialize().
682 *
683 * @param[in] scheduler The scheduler instance.
684 * @param[in] node The scheduler node to destroy.
685 */
686RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
687  const Scheduler_Control *scheduler,
688  Scheduler_Node          *node
689)
690{
691  ( *scheduler->Operations.node_destroy )( scheduler, node );
692}
693
694/**
695 * @brief Releases a job of a thread with respect to the scheduler.
696 *
697 * @param[in] the_thread The thread.
698 * @param[in] priority_node The priority node of the job.
699 * @param[in] deadline The deadline in watchdog ticks since boot.
700 * @param[in] queue_context The thread queue context to provide the set of
701 *   threads for _Thread_Priority_update().
702 */
703RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
704  Thread_Control       *the_thread,
705  Priority_Node        *priority_node,
706  uint64_t              deadline,
707  Thread_queue_Context *queue_context
708)
709{
710  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
711
712  _Thread_queue_Context_clear_priority_updates( queue_context );
713  ( *scheduler->Operations.release_job )(
714    scheduler,
715    the_thread,
716    priority_node,
717    deadline,
718    queue_context
719  );
720}
721
722/**
723 * @brief Cancels a job of a thread with respect to the scheduler.
724 *
725 * @param[in] the_thread The thread.
726 * @param[in] priority_node The priority node of the job.
727 * @param[in] queue_context The thread queue context to provide the set of
728 *   threads for _Thread_Priority_update().
729 */
730RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
731  Thread_Control       *the_thread,
732  Priority_Node        *priority_node,
733  Thread_queue_Context *queue_context
734)
735{
736  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
737
738  _Thread_queue_Context_clear_priority_updates( queue_context );
739  ( *scheduler->Operations.cancel_job )(
740    scheduler,
741    the_thread,
742    priority_node,
743    queue_context
744  );
745}
746
747/**
748 * @brief Scheduler method invoked at each clock tick.
749 *
750 * This method is invoked at each clock tick to allow the scheduler
751 * implementation to perform any activities required.  For the
752 * scheduler which support standard RTEMS features, this includes
753 * time-slicing management.
754 */
755RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
756{
757  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
758  Thread_Control *executing = cpu->executing;
759
760  if ( scheduler != NULL && executing != NULL ) {
761    ( *scheduler->Operations.tick )( scheduler, executing );
762  }
763}
764
765/**
766 * @brief Starts the idle thread for a particular processor.
767 *
768 * @param[in] scheduler The scheduler instance.
769 * @param[in,out] the_thread The idle thread for the processor.
770 * @param[in,out] cpu The processor for the idle thread.
771 *
772 * @see _Thread_Create_idle().
773 */
774RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
775  const Scheduler_Control *scheduler,
776  Thread_Control          *the_thread,
777  Per_CPU_Control         *cpu
778)
779{
780  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
781}
782
783#if defined(RTEMS_SMP)
784RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
785  uint32_t cpu_index
786)
787{
788  return &_Scheduler_Assignments[ cpu_index ];
789}
790
791RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
792  const Scheduler_Assignment *assignment
793)
794{
795  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
796}
797
798RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
799  const Scheduler_Assignment *assignment
800)
801{
802  return assignment->scheduler != NULL;
803}
804#endif /* defined(RTEMS_SMP) */
805
806RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
807  const Scheduler_Control *scheduler,
808  uint32_t cpu_index
809)
810{
811#if defined(RTEMS_SMP)
812  const Scheduler_Assignment *assignment =
813    _Scheduler_Get_assignment( cpu_index );
814
815  return assignment->scheduler == scheduler;
816#else
817  (void) scheduler;
818  (void) cpu_index;
819
820  return true;
821#endif
822}
823
824#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
825
826RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
827  const Scheduler_Control *scheduler,
828  size_t                   cpusetsize,
829  cpu_set_t               *cpuset
830)
831{
832  uint32_t cpu_count = _SMP_Get_processor_count();
833  uint32_t cpu_index;
834
835  CPU_ZERO_S( cpusetsize, cpuset );
836
837  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
838#if defined(RTEMS_SMP)
839    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
840      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
841    }
842#else
843    (void) scheduler;
844
845    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
846#endif
847  }
848}
849
850RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
851  const Scheduler_Control *scheduler,
852  Thread_Control          *the_thread,
853  size_t                   cpusetsize,
854  cpu_set_t               *cpuset
855)
856{
857  (void) the_thread;
858
859  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
860
861  return true;
862}
863
864bool _Scheduler_Get_affinity(
865  Thread_Control *the_thread,
866  size_t          cpusetsize,
867  cpu_set_t      *cpuset
868);
869
870RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
871  const Scheduler_Control *scheduler,
872  Thread_Control          *the_thread,
873  size_t                   cpusetsize,
874  const cpu_set_t         *cpuset
875)
876{
877  uint32_t cpu_count = _SMP_Get_processor_count();
878  uint32_t cpu_index;
879  bool     ok = true;
880
881  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
882#if defined(RTEMS_SMP)
883    const Scheduler_Control *scheduler_of_cpu =
884      _Scheduler_Get_by_CPU_index( cpu_index );
885
886    ok = ok
887      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
888        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
889          && scheduler != scheduler_of_cpu ) );
890#else
891    (void) scheduler;
892
893    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
894#endif
895  }
896
897  return ok;
898}
899
900bool _Scheduler_Set_affinity(
901  Thread_Control  *the_thread,
902  size_t           cpusetsize,
903  const cpu_set_t *cpuset
904);
905
906#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
907
908RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
909  const Scheduler_Control *scheduler,
910  Thread_Control          *the_thread,
911  Scheduler_Node          *node,
912  void                  ( *extract )(
913                             const Scheduler_Control *,
914                             Thread_Control *,
915                             Scheduler_Node *
916                        ),
917  void                  ( *schedule )(
918                             const Scheduler_Control *,
919                             Thread_Control *,
920                             bool
921                        )
922)
923{
924  ( *extract )( scheduler, the_thread, node );
925
926  /* TODO: flash critical section? */
927
928  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
929    ( *schedule )( scheduler, the_thread, true );
930  }
931}
932
933RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
934  const Scheduler_Control *scheduler
935)
936{
937#if defined(RTEMS_SMP)
938  return _Scheduler_Get_context( scheduler )->processor_count;
939#else
940  (void) scheduler;
941
942  return 1;
943#endif
944}
945
946RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
947{
948  return _Objects_Build_id(
949    OBJECTS_FAKE_OBJECTS_API,
950    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
951    _Objects_Local_node,
952    (uint16_t) ( scheduler_index + 1 )
953  );
954}
955
956RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
957{
958  uint32_t minimum_id = _Scheduler_Build_id( 0 );
959
960  return id - minimum_id;
961}
962
963RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
964  Objects_Id                id,
965  const Scheduler_Control **scheduler_p
966)
967{
968  uint32_t index = _Scheduler_Get_index_by_id( id );
969  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
970
971  *scheduler_p = scheduler;
972
973  return index < _Scheduler_Count
974    && _Scheduler_Get_processor_count( scheduler ) > 0;
975}
976
977RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
978{
979  const Scheduler_Control *scheduler;
980  bool ok = _Scheduler_Get_by_id( id, &scheduler );
981
982  (void) scheduler;
983
984  return ok;
985}
986
987RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
988  const Scheduler_Control *scheduler
989)
990{
991  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
992}
993
994RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
995  Thread_Control   *the_thread,
996  Priority_Control  new_priority,
997  bool              prepend_it
998)
999{
1000  Scheduler_Node *scheduler_node;
1001
1002  scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1003  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
1004}
1005
1006#if defined(RTEMS_SMP)
1007/**
1008 * @brief Gets an idle thread from the scheduler instance.
1009 *
1010 * @param[in] context The scheduler instance context.
1011 *
1012 * @retval idle An idle thread for use.  This function must always return an
1013 * idle thread.  If none is available, then this is a fatal error.
1014 */
1015typedef Thread_Control *( *Scheduler_Get_idle_thread )(
1016  Scheduler_Context *context
1017);
1018
1019/**
1020 * @brief Releases an idle thread to the scheduler instance for reuse.
1021 *
1022 * @param[in] context The scheduler instance context.
1023 * @param[in] idle The idle thread to release
1024 */
1025typedef void ( *Scheduler_Release_idle_thread )(
1026  Scheduler_Context *context,
1027  Thread_Control    *idle
1028);
1029
1030RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
1031  Thread_Control *the_thread,
1032  Scheduler_Node *node
1033)
1034{
1035  the_thread->Scheduler.node = node;
1036}
1037
1038RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
1039  Thread_Control       *the_thread,
1040  Scheduler_Node       *node,
1041  const Thread_Control *previous_user_of_node
1042)
1043{
1044  const Scheduler_Control *scheduler =
1045    _Scheduler_Get_own( previous_user_of_node );
1046
1047  the_thread->Scheduler.control = scheduler;
1048  _Scheduler_Thread_set_node( the_thread, node );
1049}
1050
1051extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
1052
1053RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
1054  Thread_Control         *the_thread,
1055  Thread_Scheduler_state  new_state
1056)
1057{
1058  _Assert(
1059    _Scheduler_Thread_state_valid_state_changes
1060      [ the_thread->Scheduler.state ][ new_state ]
1061  );
1062  _Assert(
1063    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
1064      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
1065      || !_System_state_Is_up( _System_state_Get() )
1066  );
1067
1068  the_thread->Scheduler.state = new_state;
1069}
1070
1071/**
1072 * @brief Changes the scheduler help state of a thread.
1073 *
1074 * @param[in] the_thread The thread.
1075 * @param[in] new_help_state The new help state.
1076 *
1077 * @return The previous help state.
1078 */
1079RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
1080  Thread_Control       *the_thread,
1081  Scheduler_Help_state  new_help_state
1082)
1083{
1084  Scheduler_Node *node = _Thread_Scheduler_get_own_node( the_thread );
1085  Scheduler_Help_state previous_help_state = node->help_state;
1086
1087  node->help_state = new_help_state;
1088
1089  return previous_help_state;
1090}
1091
1092/**
1093 * @brief Changes the resource tree root of a thread.
1094 *
1095 * For each node of the resource sub-tree specified by the top thread the
1096 * scheduler asks for help.  So the root thread gains access to all scheduler
1097 * nodes corresponding to the resource sub-tree.  In case a thread previously
1098 * granted help is displaced by this operation, then the scheduler asks for
1099 * help using its remaining resource tree.
1100 *
1101 * The run-time of this function depends on the size of the resource sub-tree
1102 * and other resource trees in case threads in need for help are produced
1103 * during this operation.
1104 *
1105 * @param[in] top The thread specifying the resource sub-tree top.
1106 * @param[in] root The thread specifying the new resource sub-tree root.
1107 */
1108void _Scheduler_Thread_change_resource_root(
1109  Thread_Control *top,
1110  Thread_Control *root
1111);
1112
1113RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
1114  Scheduler_Node *node,
1115  Thread_Control *idle
1116)
1117{
1118  _Assert(
1119    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1120      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1121  );
1122  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
1123  _Assert(
1124    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
1125  );
1126
1127  _Scheduler_Thread_set_node( idle, node );
1128
1129  _Scheduler_Node_set_user( node, idle );
1130  node->idle = idle;
1131}
1132
1133/**
1134 * @brief Use an idle thread for this scheduler node.
1135 *
1136 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
1137 * helping state may use an idle thread for the scheduler node owned by itself
1138 * in case it executes currently using another scheduler node or in case it is
1139 * in a blocking state.
1140 *
1141 * @param[in] context The scheduler instance context.
1142 * @param[in] node The node which wants to use the idle thread.
1143 * @param[in] get_idle_thread Function to get an idle thread.
1144 */
1145RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
1146  Scheduler_Context         *context,
1147  Scheduler_Node            *node,
1148  Scheduler_Get_idle_thread  get_idle_thread
1149)
1150{
1151  Thread_Control *idle = ( *get_idle_thread )( context );
1152
1153  _Scheduler_Set_idle_thread( node, idle );
1154
1155  return idle;
1156}
1157
1158typedef enum {
1159  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
1160  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
1161  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
1162} Scheduler_Try_to_schedule_action;
1163
1164/**
1165 * @brief Try to schedule this scheduler node.
1166 *
1167 * @param[in] context The scheduler instance context.
1168 * @param[in] node The node which wants to get scheduled.
1169 * @param[in] idle A potential idle thread used by a potential victim node.
1170 * @param[in] get_idle_thread Function to get an idle thread.
1171 *
1172 * @retval true This node can be scheduled.
1173 * @retval false Otherwise.
1174 */
1175RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
1176_Scheduler_Try_to_schedule_node(
1177  Scheduler_Context         *context,
1178  Scheduler_Node            *node,
1179  Thread_Control            *idle,
1180  Scheduler_Get_idle_thread  get_idle_thread
1181)
1182{
1183  ISR_lock_Context                  lock_context;
1184  Scheduler_Try_to_schedule_action  action;
1185  Thread_Control                   *owner;
1186  Thread_Control                   *user;
1187
1188  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
1189  user = _Scheduler_Node_get_user( node );
1190
1191  _Thread_Scheduler_acquire_critical( user, &lock_context );
1192
1193  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1194    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1195      _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) );
1196      _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1197    } else {
1198      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1199    }
1200
1201    _Thread_Scheduler_release_critical( user, &lock_context );
1202    return action;
1203  }
1204
1205  owner = _Scheduler_Node_get_owner( node );
1206
1207  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
1208    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1209      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1210    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
1211      if ( idle != NULL ) {
1212        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1213      } else {
1214        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1215      }
1216    } else {
1217      _Scheduler_Node_set_user( node, owner );
1218    }
1219  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1220    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1221      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1222    } else if ( idle != NULL ) {
1223      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1224    } else {
1225      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1226    }
1227  } else {
1228    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1229
1230    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1231      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1232    } else {
1233      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1234    }
1235  }
1236
1237  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1238    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1239  }
1240
1241  _Thread_Scheduler_release_critical( user, &lock_context );
1242  return action;
1243}
1244
1245/**
1246 * @brief Release an idle thread using this scheduler node.
1247 *
1248 * @param[in] context The scheduler instance context.
1249 * @param[in] node The node which may have an idle thread as user.
1250 * @param[in] release_idle_thread Function to release an idle thread.
1251 *
1252 * @retval idle The idle thread which used this node.
1253 * @retval NULL This node had no idle thread as an user.
1254 */
1255RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1256  Scheduler_Context             *context,
1257  Scheduler_Node                *node,
1258  Scheduler_Release_idle_thread  release_idle_thread
1259)
1260{
1261  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1262
1263  if ( idle != NULL ) {
1264    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1265
1266    node->idle = NULL;
1267    _Scheduler_Node_set_user( node, owner );
1268    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
1269    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
1270
1271    ( *release_idle_thread )( context, idle );
1272  }
1273
1274  return idle;
1275}
1276
1277RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1278  Scheduler_Node *needs_idle,
1279  Scheduler_Node *uses_idle,
1280  Thread_Control *idle
1281)
1282{
1283  uses_idle->idle = NULL;
1284  _Scheduler_Node_set_user(
1285    uses_idle,
1286    _Scheduler_Node_get_owner( uses_idle )
1287  );
1288  _Scheduler_Set_idle_thread( needs_idle, idle );
1289}
1290
1291/**
1292 * @brief Block this scheduler node.
1293 *
1294 * @param[in] context The scheduler instance context.
1295 * @param[in] thread The thread which wants to get blocked referencing this
1296 *   node.  This is not necessarily the user of this node in case the node
1297 *   participates in the scheduler helping protocol.
1298 * @param[in] node The node which wants to get blocked.
1299 * @param[in] is_scheduled This node is scheduled.
1300 * @param[in] get_idle_thread Function to get an idle thread.
1301 *
1302 * @retval thread_cpu The processor of the thread.  Indicates to continue with
1303 *   the blocking operation.
1304 * @retval NULL Otherwise.
1305 */
1306RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
1307  Scheduler_Context         *context,
1308  Thread_Control            *thread,
1309  Scheduler_Node            *node,
1310  bool                       is_scheduled,
1311  Scheduler_Get_idle_thread  get_idle_thread
1312)
1313{
1314  ISR_lock_Context  lock_context;
1315  Thread_Control   *old_user;
1316  Thread_Control   *new_user;
1317  Per_CPU_Control  *thread_cpu;
1318
1319  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1320  thread_cpu = _Thread_Get_CPU( thread );
1321  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
1322  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1323  _Thread_Scheduler_release_critical( thread, &lock_context );
1324
1325  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1326    _Assert( thread == _Scheduler_Node_get_user( node ) );
1327
1328    return thread_cpu;
1329  }
1330
1331  new_user = NULL;
1332
1333  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1334    if ( is_scheduled ) {
1335      _Assert( thread == _Scheduler_Node_get_user( node ) );
1336      old_user = thread;
1337      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1338    }
1339  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
1340    if ( is_scheduled ) {
1341      old_user = _Scheduler_Node_get_user( node );
1342
1343      if ( thread == old_user ) {
1344        Thread_Control *owner = _Scheduler_Node_get_owner( node );
1345
1346        if (
1347          thread != owner
1348            && owner->Scheduler.state == THREAD_SCHEDULER_READY
1349        ) {
1350          new_user = owner;
1351          _Scheduler_Node_set_user( node, new_user );
1352        } else {
1353          new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1354        }
1355      }
1356    }
1357  } else {
1358    /* Not implemented, this is part of the OMIP support path. */
1359    _Assert(0);
1360  }
1361
1362  if ( new_user != NULL ) {
1363    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1364
1365    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1366    _Thread_Set_CPU( new_user, cpu );
1367    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1368  }
1369
1370  return NULL;
1371}
1372
1373/**
1374 * @brief Unblock this scheduler node.
1375 *
1376 * @param[in] context The scheduler instance context.
1377 * @param[in] the_thread The thread which wants to get unblocked.
1378 * @param[in] node The node which wants to get unblocked.
1379 * @param[in] is_scheduled This node is scheduled.
1380 * @param[in] release_idle_thread Function to release an idle thread.
1381 *
1382 * @retval true Continue with the unblocking operation.
1383 * @retval false Otherwise.
1384 */
1385RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1386  Scheduler_Context             *context,
1387  Thread_Control                *the_thread,
1388  Scheduler_Node                *node,
1389  bool                           is_scheduled,
1390  Scheduler_Release_idle_thread  release_idle_thread
1391)
1392{
1393  bool unblock;
1394
1395  if ( is_scheduled ) {
1396    Thread_Control *old_user = _Scheduler_Node_get_user( node );
1397    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1398    Thread_Control *idle = _Scheduler_Release_idle_thread(
1399      context,
1400      node,
1401      release_idle_thread
1402    );
1403    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1404    Thread_Control *new_user;
1405
1406    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1407      _Assert( idle != NULL );
1408      new_user = the_thread;
1409    } else if ( idle != NULL ) {
1410      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1411      new_user = the_thread;
1412    } else if ( the_thread != owner ) {
1413      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1414      _Assert( old_user != the_thread );
1415      _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
1416      new_user = the_thread;
1417      _Scheduler_Node_set_user( node, new_user );
1418    } else {
1419      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1420      _Assert( old_user != the_thread );
1421      _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1422      new_user = NULL;
1423    }
1424
1425    if ( new_user != NULL ) {
1426      _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1427      _Thread_Set_CPU( new_user, cpu );
1428      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1429    }
1430
1431    unblock = false;
1432  } else {
1433    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1434
1435    unblock = true;
1436  }
1437
1438  return unblock;
1439}
1440
1441/**
1442 * @brief Asks a ready scheduler node for help.
1443 *
1444 * @param[in] node The ready node offering help.
1445 * @param[in] needs_help The thread needing help.
1446 *
1447 * @retval needs_help The thread needing help.
1448 */
1449RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
1450  Scheduler_Node *node,
1451  Thread_Control *needs_help
1452)
1453{
1454  _Scheduler_Node_set_user( node, needs_help );
1455
1456  return needs_help;
1457}
1458
1459/**
1460 * @brief Asks a scheduled scheduler node for help.
1461 *
1462 * @param[in] context The scheduler instance context.
1463 * @param[in] node The scheduled node offering help.
1464 * @param[in] offers_help The thread offering help.
1465 * @param[in] needs_help The thread needing help.
1466 * @param[in] previous_accepts_help The previous thread accepting help by this
1467 *   scheduler node.
1468 * @param[in] release_idle_thread Function to release an idle thread.
1469 *
1470 * @retval needs_help The previous thread accepting help by this scheduler node
1471 *   which was displaced by the thread needing help.
1472 * @retval NULL There are no more threads needing help.
1473 */
1474RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
1475  Scheduler_Context             *context,
1476  Scheduler_Node                *node,
1477  Thread_Control                *offers_help,
1478  Thread_Control                *needs_help,
1479  Thread_Control                *previous_accepts_help,
1480  Scheduler_Release_idle_thread  release_idle_thread
1481)
1482{
1483  Thread_Control *next_needs_help = NULL;
1484  Thread_Control *old_user = NULL;
1485  Thread_Control *new_user = NULL;
1486
1487  if (
1488    previous_accepts_help != needs_help
1489      && _Scheduler_Thread_get_node( previous_accepts_help ) == node
1490  ) {
1491    Thread_Control *idle = _Scheduler_Release_idle_thread(
1492      context,
1493      node,
1494      release_idle_thread
1495    );
1496
1497    if ( idle != NULL ) {
1498      old_user = idle;
1499    } else {
1500      _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
1501      old_user = previous_accepts_help;
1502    }
1503
1504    if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1505      new_user = needs_help;
1506    } else {
1507      _Assert(
1508        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1509          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1510      );
1511      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
1512
1513      new_user = offers_help;
1514    }
1515
1516    if ( previous_accepts_help != offers_help ) {
1517      next_needs_help = previous_accepts_help;
1518    }
1519  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1520    Thread_Control *idle = _Scheduler_Release_idle_thread(
1521      context,
1522      node,
1523      release_idle_thread
1524    );
1525
1526    if ( idle != NULL ) {
1527      old_user = idle;
1528    } else {
1529      old_user = _Scheduler_Node_get_user( node );
1530    }
1531
1532    new_user = needs_help;
1533  } else {
1534    _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
1535  }
1536
1537  if ( new_user != old_user ) {
1538    Per_CPU_Control *cpu_self = _Per_CPU_Get();
1539    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1540
1541    _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
1542    _Scheduler_Thread_set_scheduler_and_node(
1543      old_user,
1544      _Thread_Scheduler_get_own_node( old_user ),
1545      old_user
1546    );
1547
1548    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1549    _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
1550
1551    _Scheduler_Node_set_user( node, new_user );
1552    _Thread_Set_CPU( new_user, cpu );
1553    _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
1554  }
1555
1556  return next_needs_help;
1557}
1558
1559/**
1560 * @brief Asks a blocked scheduler node for help.
1561 *
1562 * @param[in] context The scheduler instance context.
1563 * @param[in] node The scheduled node offering help.
1564 * @param[in] offers_help The thread offering help.
1565 * @param[in] needs_help The thread needing help.
1566 *
1567 * @retval true Enqueue this scheduler node.
1568 * @retval false Otherwise.
1569 */
1570RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
1571  Scheduler_Context *context,
1572  Scheduler_Node    *node,
1573  Thread_Control    *offers_help,
1574  Thread_Control    *needs_help
1575)
1576{
1577  bool enqueue;
1578
1579  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1580
1581  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1582    _Scheduler_Node_set_user( node, needs_help );
1583    _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
1584
1585    enqueue = true;
1586  } else {
1587    enqueue = false;
1588  }
1589
1590  return enqueue;
1591}
1592#endif
1593
1594RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1595  Thread_Control *new_heir,
1596  bool            force_dispatch
1597)
1598{
1599  Thread_Control *heir = _Thread_Heir;
1600
1601  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1602#if defined(RTEMS_SMP)
1603    /*
1604     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1605     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1606     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1607     * schedulers.
1608     */
1609    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1610    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1611#endif
1612    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1613    _Thread_Heir = new_heir;
1614    _Thread_Dispatch_necessary = true;
1615  }
1616}
1617
1618RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1619  const Scheduler_Control *new_scheduler,
1620  Thread_Control          *the_thread,
1621  Priority_Control         priority
1622)
1623{
1624  Scheduler_Node *new_scheduler_node;
1625  Scheduler_Node *old_scheduler_node;
1626
1627  if (
1628    _Thread_Owns_resources( the_thread )
1629      || the_thread->Wait.queue != NULL
1630  ) {
1631    return STATUS_RESOURCE_IN_USE;
1632  }
1633
1634  old_scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1635  _Priority_Plain_extract(
1636    &old_scheduler_node->Wait.Priority,
1637    &the_thread->Real_priority
1638  );
1639
1640  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1641    _Priority_Plain_insert(
1642      &old_scheduler_node->Wait.Priority,
1643      &the_thread->Real_priority,
1644      the_thread->Real_priority.priority
1645    );
1646    return STATUS_RESOURCE_IN_USE;
1647  }
1648
1649#if defined(RTEMS_SMP)
1650  _Thread_Scheduler_process_requests( the_thread );
1651  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1652    the_thread,
1653    _Scheduler_Get_index( new_scheduler )
1654  );
1655#else
1656  new_scheduler_node = old_scheduler_node;
1657#endif
1658
1659  the_thread->Start.initial_priority = priority;
1660  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1661  _Priority_Initialize_one(
1662    &new_scheduler_node->Wait.Priority,
1663    &the_thread->Real_priority
1664  );
1665
1666#if defined(RTEMS_SMP)
1667  _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1668  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1669  _Chain_Initialize_one(
1670    &the_thread->Scheduler.Wait_nodes,
1671    &new_scheduler_node->Thread.Wait_node
1672  );
1673  _Chain_Extract_unprotected(
1674    &old_scheduler_node->Thread.Scheduler_node.Chain
1675  );
1676  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1677  _Chain_Initialize_one(
1678    &the_thread->Scheduler.Scheduler_nodes,
1679    &new_scheduler_node->Thread.Scheduler_node.Chain
1680  );
1681
1682  {
1683    const Scheduler_Control *old_scheduler;
1684
1685    old_scheduler = _Scheduler_Get( the_thread );
1686
1687    if ( old_scheduler != new_scheduler ) {
1688      States_Control current_state;
1689
1690      current_state = the_thread->current_state;
1691
1692      if ( _States_Is_ready( current_state ) ) {
1693        _Scheduler_Block( the_thread );
1694      }
1695
1696      the_thread->Scheduler.own_control = new_scheduler;
1697      the_thread->Scheduler.control = new_scheduler;
1698      the_thread->Scheduler.own_node = new_scheduler_node;
1699      the_thread->Scheduler.node = new_scheduler_node;
1700      _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1701
1702      if ( _States_Is_ready( current_state ) ) {
1703        _Scheduler_Unblock( the_thread );
1704      }
1705
1706      return STATUS_SUCCESSFUL;
1707    }
1708  }
1709#endif
1710
1711  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1712  _Scheduler_Update_priority( the_thread );
1713  return STATUS_SUCCESSFUL;
1714}
1715
1716/** @} */
1717
1718#ifdef __cplusplus
1719}
1720#endif
1721
1722#endif
1723/* end of include file */
Note: See TracBrowser for help on using the repository browser.