source: rtems/cpukit/include/rtems/score/schedulersmpimpl.h @ ee3351c

5
Last change on this file since ee3351c was 6a1734a3, checked in by Andreas Dachsberger <andreas.dachsberger@…>, on 04/15/19 at 08:17:38

doxygen: score: adjust doc in schedulersmpimpl.h to doxygen guidelines

Update #3706.

  • Property mode set to 100644
File size: 54.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup RTEMSScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013, 2017 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30#include <rtems/bspIo.h>
31
32#ifdef __cplusplus
33extern "C" {
34#endif /* __cplusplus */
35
36/**
37 * @addtogroup RTEMSScoreSchedulerSMP
38 *
39 * The scheduler nodes can be in four states
40 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
41 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
42 * - @ref SCHEDULER_SMP_NODE_READY.
43 *
44 * State transitions are triggered via basic operations
45 * - _Scheduler_SMP_Enqueue(),
46 * - _Scheduler_SMP_Enqueue_scheduled(), and
47 * - _Scheduler_SMP_Block().
48 *
49 * @dot
50 * digraph {
51 *   node [style="filled"];
52 *
53 *   bs [label="BLOCKED"];
54 *   ss [label="SCHEDULED", fillcolor="green"];
55 *   rs [label="READY", fillcolor="red"];
56 *
57 *   edge [label="enqueue"];
58 *   edge [fontcolor="darkgreen", color="darkgreen"];
59 *
60 *   bs -> ss;
61 *
62 *   edge [fontcolor="red", color="red"];
63 *
64 *   bs -> rs;
65 *
66 *   edge [label="enqueue other"];
67 *
68 *   ss -> rs;
69 *
70 *   edge [label="block"];
71 *   edge [fontcolor="black", color="black"];
72 *
73 *   ss -> bs;
74 *   rs -> bs;
75 *
76 *   edge [label="block other"];
77 *   edge [fontcolor="darkgreen", color="darkgreen"];
78 *
79 *   rs -> ss;
80 * }
81 * @enddot
82 *
83 * During system initialization each processor of the scheduler instance starts
84 * with an idle thread assigned to it.  Lets have a look at an example with two
85 * idle threads I and J with priority 5.  We also have blocked threads A, B and
86 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
87 * with respect to the thread priority from left to right in the below
88 * diagrams.  The highest priority node (lowest priority number) is the
89 * leftmost node.  Since the processor assignment is independent of the thread
90 * priority the processor indices may move from one state to the other.
91 *
92 * @dot
93 * digraph {
94 *   node [style="filled"];
95 *   edge [dir="none"];
96 *   subgraph {
97 *     rank = same;
98 *
99 *     i [label="I (5)", fillcolor="green"];
100 *     j [label="J (5)", fillcolor="green"];
101 *     a [label="A (1)"];
102 *     b [label="B (2)"];
103 *     c [label="C (3)"];
104 *     i -> j;
105 *   }
106 *
107 *   subgraph {
108 *     rank = same;
109 *
110 *     p0 [label="PROCESSOR 0", shape="box"];
111 *     p1 [label="PROCESSOR 1", shape="box"];
112 *   }
113 *
114 *   i -> p0;
115 *   j -> p1;
116 * }
117 * @enddot
118 *
119 * Lets start A.  For this an enqueue operation is performed.
120 *
121 * @dot
122 * digraph {
123 *   node [style="filled"];
124 *   edge [dir="none"];
125 *
126 *   subgraph {
127 *     rank = same;
128 *
129 *     i [label="I (5)", fillcolor="green"];
130 *     j [label="J (5)", fillcolor="red"];
131 *     a [label="A (1)", fillcolor="green"];
132 *     b [label="B (2)"];
133 *     c [label="C (3)"];
134 *     a -> i;
135 *   }
136 *
137 *   subgraph {
138 *     rank = same;
139 *
140 *     p0 [label="PROCESSOR 0", shape="box"];
141 *     p1 [label="PROCESSOR 1", shape="box"];
142 *   }
143 *
144 *   i -> p0;
145 *   a -> p1;
146 * }
147 * @enddot
148 *
149 * Lets start C.
150 *
151 * @dot
152 * digraph {
153 *   node [style="filled"];
154 *   edge [dir="none"];
155 *
156 *   subgraph {
157 *     rank = same;
158 *
159 *     a [label="A (1)", fillcolor="green"];
160 *     c [label="C (3)", fillcolor="green"];
161 *     i [label="I (5)", fillcolor="red"];
162 *     j [label="J (5)", fillcolor="red"];
163 *     b [label="B (2)"];
164 *     a -> c;
165 *     i -> j;
166 *   }
167 *
168 *   subgraph {
169 *     rank = same;
170 *
171 *     p0 [label="PROCESSOR 0", shape="box"];
172 *     p1 [label="PROCESSOR 1", shape="box"];
173 *   }
174 *
175 *   c -> p0;
176 *   a -> p1;
177 * }
178 * @enddot
179 *
180 * Lets start B.
181 *
182 * @dot
183 * digraph {
184 *   node [style="filled"];
185 *   edge [dir="none"];
186 *
187 *   subgraph {
188 *     rank = same;
189 *
190 *     a [label="A (1)", fillcolor="green"];
191 *     b [label="B (2)", fillcolor="green"];
192 *     c [label="C (3)", fillcolor="red"];
193 *     i [label="I (5)", fillcolor="red"];
194 *     j [label="J (5)", fillcolor="red"];
195 *     a -> b;
196 *     c -> i -> j;
197 *   }
198 *
199 *   subgraph {
200 *     rank = same;
201 *
202 *     p0 [label="PROCESSOR 0", shape="box"];
203 *     p1 [label="PROCESSOR 1", shape="box"];
204 *   }
205 *
206 *   b -> p0;
207 *   a -> p1;
208 * }
209 * @enddot
210 *
211 * Lets change the priority of thread A to 4.
212 *
213 * @dot
214 * digraph {
215 *   node [style="filled"];
216 *   edge [dir="none"];
217 *
218 *   subgraph {
219 *     rank = same;
220 *
221 *     b [label="B (2)", fillcolor="green"];
222 *     c [label="C (3)", fillcolor="green"];
223 *     a [label="A (4)", fillcolor="red"];
224 *     i [label="I (5)", fillcolor="red"];
225 *     j [label="J (5)", fillcolor="red"];
226 *     b -> c;
227 *     a -> i -> j;
228 *   }
229 *
230 *   subgraph {
231 *     rank = same;
232 *
233 *     p0 [label="PROCESSOR 0", shape="box"];
234 *     p1 [label="PROCESSOR 1", shape="box"];
235 *   }
236 *
237 *   b -> p0;
238 *   c -> p1;
239 * }
240 * @enddot
241 *
242 * Now perform a blocking operation with thread B.  Please note that thread A
243 * migrated now from processor 0 to processor 1 and thread C still executes on
244 * processor 1.
245 *
246 * @dot
247 * digraph {
248 *   node [style="filled"];
249 *   edge [dir="none"];
250 *
251 *   subgraph {
252 *     rank = same;
253 *
254 *     c [label="C (3)", fillcolor="green"];
255 *     a [label="A (4)", fillcolor="green"];
256 *     i [label="I (5)", fillcolor="red"];
257 *     j [label="J (5)", fillcolor="red"];
258 *     b [label="B (2)"];
259 *     c -> a;
260 *     i -> j;
261 *   }
262 *
263 *   subgraph {
264 *     rank = same;
265 *
266 *     p0 [label="PROCESSOR 0", shape="box"];
267 *     p1 [label="PROCESSOR 1", shape="box"];
268 *   }
269 *
270 *   a -> p0;
271 *   c -> p1;
272 * }
273 * @enddot
274 *
275 * @{
276 */
277
278typedef bool ( *Scheduler_SMP_Has_ready )(
279  Scheduler_Context *context
280);
281
282typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
283  Scheduler_Context *context,
284  Scheduler_Node    *node
285);
286
287typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
288  Scheduler_Context *context,
289  Scheduler_Node    *filter
290);
291
292typedef void ( *Scheduler_SMP_Extract )(
293  Scheduler_Context *context,
294  Scheduler_Node    *node_to_extract
295);
296
297typedef void ( *Scheduler_SMP_Insert )(
298  Scheduler_Context *context,
299  Scheduler_Node    *node_to_insert,
300  Priority_Control   insert_priority
301);
302
303typedef void ( *Scheduler_SMP_Move )(
304  Scheduler_Context *context,
305  Scheduler_Node    *node_to_move
306);
307
308typedef bool ( *Scheduler_SMP_Ask_for_help )(
309  Scheduler_Context *context,
310  Thread_Control    *thread,
311  Scheduler_Node    *node
312);
313
314typedef void ( *Scheduler_SMP_Update )(
315  Scheduler_Context *context,
316  Scheduler_Node    *node_to_update,
317  Priority_Control   new_priority
318);
319
320typedef void ( *Scheduler_SMP_Set_affinity )(
321  Scheduler_Context *context,
322  Scheduler_Node    *node,
323  void              *arg
324);
325
326typedef bool ( *Scheduler_SMP_Enqueue )(
327  Scheduler_Context *context,
328  Scheduler_Node    *node_to_enqueue,
329  Priority_Control   priority
330);
331
332typedef void ( *Scheduler_SMP_Allocate_processor )(
333  Scheduler_Context *context,
334  Scheduler_Node    *scheduled,
335  Scheduler_Node    *victim,
336  Per_CPU_Control   *victim_cpu
337);
338
339typedef void ( *Scheduler_SMP_Register_idle )(
340  Scheduler_Context *context,
341  Scheduler_Node    *idle,
342  Per_CPU_Control   *cpu
343);
344
345/**
346 * @brief Does nothing.
347 *
348 * @param context This parameter is unused.
349 * @param idle This parameter is unused.
350 * @param cpu This parameter is unused.
351 */
352static inline void _Scheduler_SMP_Do_nothing_register_idle(
353  Scheduler_Context *context,
354  Scheduler_Node    *idle,
355  Per_CPU_Control   *cpu
356)
357{
358  (void) context;
359  (void) idle;
360  (void) cpu;
361}
362
363/**
364 * @brief Checks if @a to_insert is less or equal than the priority of the chain node.
365 *
366 * @param to_insert The priority to compare.
367 * @param next The chain node to compare the priority of.
368 *
369 * @retval true @a to_insert is less or equal than the priority of @a next.
370 * @retval false @a to_insert is greater than the priority of @a next.
371 */
372static inline bool _Scheduler_SMP_Priority_less_equal(
373  const void       *to_insert,
374  const Chain_Node *next
375)
376{
377  const Priority_Control   *priority_to_insert;
378  const Scheduler_SMP_Node *node_next;
379
380  priority_to_insert = (const Priority_Control *) to_insert;
381  node_next = (const Scheduler_SMP_Node *) next;
382
383  return *priority_to_insert <= node_next->priority;
384}
385
386/**
387 * @brief Gets the scheduler smp context.
388 *
389 * @param context The context to cast to Scheduler_SMP_Context *.
390 *
391 * @return @a context cast to Scheduler_SMP_Context *.
392 */
393static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
394  Scheduler_Context *context
395)
396{
397  return (Scheduler_SMP_Context *) context;
398}
399
400/**
401 * @brief Initializes the scheduler smp context.
402 *
403 * @param[out] self The context to initialize.
404 */
405static inline void _Scheduler_SMP_Initialize(
406  Scheduler_SMP_Context *self
407)
408{
409  _Chain_Initialize_empty( &self->Scheduled );
410  _Chain_Initialize_empty( &self->Idle_threads );
411}
412
413/**
414 * @brief Gets the scheduler smp node of the thread.
415 *
416 * @param thread The thread to get the smp node of.
417 *
418 * @return The scheduler smp node of @a thread.
419 */
420static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
421  Thread_Control *thread
422)
423{
424  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
425}
426
427/**
428 * @brief Gets the scheduler smp node of the thread.
429 *
430 * @param thread The thread to get the smp node of.
431 *
432 * @return The scheduler smp node of @a thread.
433 */
434static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
435  Thread_Control *thread
436)
437{
438  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
439}
440
441/**
442 * @brief Gets the scheduler smp node.
443 *
444 * @param node The node to cast to Scheduler_SMP_Node *.
445 *
446 * @return @a node cast to Scheduler_SMP_Node *.
447 */
448static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
449  Scheduler_Node *node
450)
451{
452  return (Scheduler_SMP_Node *) node;
453}
454
455/**
456 * @brief Gets the state of the node.
457 *
458 * @param node The node to get the state of.
459 *
460 * @return The state of @a node.
461 */
462static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
463  const Scheduler_Node *node
464)
465{
466  return ( (const Scheduler_SMP_Node *) node )->state;
467}
468
469/**
470 * @brief Gets the priority of the node.
471 *
472 * @param node The node to get the priority of.
473 *
474 * @return The priority of @a node.
475 */
476static inline Priority_Control _Scheduler_SMP_Node_priority(
477  const Scheduler_Node *node
478)
479{
480  return ( (const Scheduler_SMP_Node *) node )->priority;
481}
482
483/**
484 * @brief Initializes the scheduler smp node.
485 *
486 * @param scheduler The scheduler instance.
487 * @param[out] node The node to initialize.
488 * @param thread The thread of the scheduler smp node.
489 * @param priority The priority to initialize @a node with.
490 */
491static inline void _Scheduler_SMP_Node_initialize(
492  const Scheduler_Control *scheduler,
493  Scheduler_SMP_Node      *node,
494  Thread_Control          *thread,
495  Priority_Control         priority
496)
497{
498  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
499  node->state = SCHEDULER_SMP_NODE_BLOCKED;
500  node->priority = priority;
501}
502
503/**
504 * @brief Updates the priority of the node to the new priority.
505 *
506 * @param[out] node The node to update the priority of.
507 * @param new_priority The new priority for @a node.
508 */
509static inline void _Scheduler_SMP_Node_update_priority(
510  Scheduler_SMP_Node *node,
511  Priority_Control    new_priority
512)
513{
514  node->priority = new_priority;
515}
516
517/**
518 * @brief Changes the state of the node to the given state.
519 *
520 * @param[out] node the node to change the state of.
521 * @param new_state The new state for @a node.
522 */
523static inline void _Scheduler_SMP_Node_change_state(
524  Scheduler_Node           *node,
525  Scheduler_SMP_Node_state  new_state
526)
527{
528  Scheduler_SMP_Node *the_node;
529
530  the_node = _Scheduler_SMP_Node_downcast( node );
531  the_node->state = new_state;
532}
533
534/**
535 * @brief Checks if the processor is owned by the given context.
536 *
537 * @param context The context to check whether @a cpu is owned by it.
538 * @param cpu The cpu to check whether it is owned by @a context.
539 *
540 * @retval true @a cpu is owned by @a context.
541 * @retval false @a cpu is not owned by @a context.
542 */
543static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
544  const Scheduler_Context *context,
545  const Per_CPU_Control   *cpu
546)
547{
548  return cpu->Scheduler.context == context;
549}
550
551/**
552 * @brief Gets The first idle thread of the given context.
553 *
554 * @param context The scheduler context to get the first idle thread from.
555 *
556 * @return The first idle thread of @a context.
557 */
558static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
559  Scheduler_Context *context
560)
561{
562  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
563  Thread_Control *idle = (Thread_Control *)
564    _Chain_Get_first_unprotected( &self->Idle_threads );
565
566  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
567
568  return idle;
569}
570
571/**
572 * @brief Releases the thread and adds it to the idle threads.
573 *
574 * @param[in, out] context The scheduler context instance.
575 * @param idle The thread to add to the idle threads.
576 */
577static inline void _Scheduler_SMP_Release_idle_thread(
578  Scheduler_Context *context,
579  Thread_Control    *idle
580)
581{
582  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
583
584  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
585}
586
587/**
588 * @brief Extracts the node of the idle thread.
589 *
590 * @param[in, out] idle The thread to extract the node of.
591 */
592static inline void _Scheduler_SMP_Exctract_idle_thread(
593  Thread_Control *idle
594)
595{
596  _Chain_Extract_unprotected( &idle->Object.Node );
597}
598
599/**
600 * @brief Allocates the cpu for the scheduled thread.
601 *
602 * Attempts to prevent migrations but does not take into account affinity.
603 *
604 * @param context The scheduler context instance.
605 * @param scheduled The scheduled node that should be executed next.
606 * @param victim If the heir is this node's thread, no processor is allocated.
607 * @param[in, out] victim_cpu The cpu to allocate.
608 */
609static inline void _Scheduler_SMP_Allocate_processor_lazy(
610  Scheduler_Context *context,
611  Scheduler_Node    *scheduled,
612  Scheduler_Node    *victim,
613  Per_CPU_Control   *victim_cpu
614)
615{
616  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
617  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
618  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
619  Per_CPU_Control *cpu_self = _Per_CPU_Get();
620  Thread_Control *heir;
621
622  _Assert( _ISR_Get_level() != 0 );
623
624  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
625    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
626      heir = scheduled_cpu->heir;
627      _Thread_Dispatch_update_heir(
628        cpu_self,
629        scheduled_cpu,
630        scheduled_thread
631      );
632    } else {
633      /* We have to force a migration to our processor set */
634      heir = scheduled_thread;
635    }
636  } else {
637    heir = scheduled_thread;
638  }
639
640  if ( heir != victim_thread ) {
641    _Thread_Set_CPU( heir, victim_cpu );
642    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
643  }
644}
645
646/**
647 * @brief Allocates the cpu for the scheduled thread.
648 *
649 * This method is slightly different from
650 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
651 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
652 * but does not take into account affinity.
653 *
654 * @param context This parameter is unused.
655 * @param scheduled The scheduled node whose thread should be executed next.
656 * @param victim This parameter is unused.
657 * @param[in, out] victim_cpu The cpu to allocate.
658 */
659static inline void _Scheduler_SMP_Allocate_processor_exact(
660  Scheduler_Context *context,
661  Scheduler_Node    *scheduled,
662  Scheduler_Node    *victim,
663  Per_CPU_Control   *victim_cpu
664)
665{
666  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
667  Per_CPU_Control *cpu_self = _Per_CPU_Get();
668
669  (void) context;
670  (void) victim;
671
672  _Thread_Set_CPU( scheduled_thread, victim_cpu );
673  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
674}
675
676/**
677 * @brief Allocates the cpu for the scheduled thread using the given allocation function.
678 *
679 * @param context The scheduler context instance.
680 * @param scheduled The scheduled node that should be executed next.
681 * @param victim If the heir is this node's thread, no processor is allocated.
682 * @param[in, out] victim_cpu The cpu to allocate.
683 * @param allocate_processor The function to use for the allocation of @a victim_cpu.
684 */
685static inline void _Scheduler_SMP_Allocate_processor(
686  Scheduler_Context                *context,
687  Scheduler_Node                   *scheduled,
688  Scheduler_Node                   *victim,
689  Per_CPU_Control                  *victim_cpu,
690  Scheduler_SMP_Allocate_processor  allocate_processor
691)
692{
693  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
694  ( *allocate_processor )( context, scheduled, victim, victim_cpu );
695}
696
697/**
698 * @brief Preempts the victim's thread and allocates a cpu for the scheduled thread.
699 *
700 * @param context The scheduler context instance.
701 * @param scheduled Node of the scheduled thread that is about to be executed.
702 * @param[in, out] victim Node of the thread to preempt.
703 * @param allocate_processor The function for allocation of a processor for the new thread.
704 *
705 * @return The preempted thread.
706 */
707static inline Thread_Control *_Scheduler_SMP_Preempt(
708  Scheduler_Context                *context,
709  Scheduler_Node                   *scheduled,
710  Scheduler_Node                   *victim,
711  Scheduler_SMP_Allocate_processor  allocate_processor
712)
713{
714  Thread_Control   *victim_thread;
715  ISR_lock_Context  scheduler_lock_context;
716  Per_CPU_Control  *victim_cpu;
717
718  victim_thread = _Scheduler_Node_get_user( victim );
719  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
720
721  _Thread_Scheduler_acquire_critical( victim_thread, &scheduler_lock_context );
722
723  victim_cpu = _Thread_Get_CPU( victim_thread );
724
725  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
726    _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
727
728    if ( victim_thread->Scheduler.helping_nodes > 0 ) {
729      ISR_lock_Context per_cpu_lock_context;
730
731      _Per_CPU_Acquire( victim_cpu, &per_cpu_lock_context );
732      _Chain_Append_unprotected(
733        &victim_cpu->Threads_in_need_for_help,
734        &victim_thread->Scheduler.Help_node
735      );
736      _Per_CPU_Release( victim_cpu, &per_cpu_lock_context );
737    }
738  }
739
740  _Thread_Scheduler_release_critical( victim_thread, &scheduler_lock_context );
741
742  _Scheduler_SMP_Allocate_processor(
743    context,
744    scheduled,
745    victim,
746    victim_cpu,
747    allocate_processor
748  );
749
750  return victim_thread;
751}
752
753/**
754 * @brief Returns the lowest member of the scheduled nodes.
755 *
756 * @param context The scheduler context instance.
757 * @param filter This parameter is unused.
758 *
759 * @return The lowest scheduled node.
760 */
761static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
762  Scheduler_Context *context,
763  Scheduler_Node    *filter
764)
765{
766  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
767  Chain_Control *scheduled = &self->Scheduled;
768  Scheduler_Node *lowest_scheduled =
769    (Scheduler_Node *) _Chain_Last( scheduled );
770
771  (void) filter;
772
773  _Assert( &lowest_scheduled->Node.Chain != _Chain_Tail( scheduled ) );
774  _Assert(
775    _Chain_Next( &lowest_scheduled->Node.Chain ) == _Chain_Tail( scheduled )
776  );
777
778  return lowest_scheduled;
779}
780
781/**
782 * @brief Tries to schedule the given node.
783 *
784 * Schedules the node, or blocks if that is necessary.
785 *
786 * @param context The scheduler context instance.
787 * @param[in, out] node The node to insert into the scheduled nodes.
788 * @param priority The priority of @a node.
789 * @param[in, out] lowest_scheduled The lowest member of the scheduled nodes.
790 * @param insert_scheduled Function to insert a node into the set of
791 *   scheduled nodes.
792 * @param move_from_scheduled_to_ready Function to move a node from the set
793 *   of scheduled nodes to the set of ready nodes.
794 * @param allocate_processor Function to allocate a processor to a node
795 *   based on the rules of the scheduler.
796 */
797static inline void _Scheduler_SMP_Enqueue_to_scheduled(
798  Scheduler_Context                *context,
799  Scheduler_Node                   *node,
800  Priority_Control                  priority,
801  Scheduler_Node                   *lowest_scheduled,
802  Scheduler_SMP_Insert              insert_scheduled,
803  Scheduler_SMP_Move                move_from_scheduled_to_ready,
804  Scheduler_SMP_Allocate_processor  allocate_processor
805)
806{
807  Scheduler_Try_to_schedule_action action;
808
809  action = _Scheduler_Try_to_schedule_node(
810    context,
811    node,
812    _Scheduler_Node_get_idle( lowest_scheduled ),
813    _Scheduler_SMP_Get_idle_thread
814  );
815
816  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
817    _Scheduler_SMP_Preempt(
818      context,
819      node,
820      lowest_scheduled,
821      allocate_processor
822    );
823
824    ( *insert_scheduled )( context, node, priority );
825    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
826
827    _Scheduler_Release_idle_thread(
828      context,
829      lowest_scheduled,
830      _Scheduler_SMP_Release_idle_thread
831    );
832  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
833    _Scheduler_SMP_Node_change_state(
834      lowest_scheduled,
835      SCHEDULER_SMP_NODE_READY
836    );
837    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
838
839    ( *insert_scheduled )( context, node, priority );
840    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
841
842    _Scheduler_Exchange_idle_thread(
843      node,
844      lowest_scheduled,
845      _Scheduler_Node_get_idle( lowest_scheduled )
846    );
847  } else {
848    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
849    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
850  }
851}
852
853/**
854 * @brief Enqueues a node according to the specified order function.
855 *
856 * The node must not be in the scheduled state.
857 *
858 * @param context The scheduler instance context.
859 * @param[in, out] node The node to enqueue.
860 * @param priority The node insert priority.
861 * @param order The order function.
862 * @param insert_ready Function to insert a node into the set of ready
863 *   nodes.
864 * @param insert_scheduled Function to insert a node into the set of
865 *   scheduled nodes.
866 * @param move_from_scheduled_to_ready Function to move a node from the set
867 *   of scheduled nodes to the set of ready nodes.
868 * @param get_lowest_scheduled Function to select the node from the
869 *   scheduled nodes to replace.  It may not be possible to find one, in this
870 *   case a pointer must be returned so that the order functions returns false
871 *   if this pointer is passed as the second argument to the order function.
872 * @param allocate_processor Function to allocate a processor to a node
873 *   based on the rules of the scheduler.
874 */
875static inline bool _Scheduler_SMP_Enqueue(
876  Scheduler_Context                  *context,
877  Scheduler_Node                     *node,
878  Priority_Control                    insert_priority,
879  Chain_Node_order                    order,
880  Scheduler_SMP_Insert                insert_ready,
881  Scheduler_SMP_Insert                insert_scheduled,
882  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
883  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
884  Scheduler_SMP_Allocate_processor    allocate_processor
885)
886{
887  bool            needs_help;
888  Scheduler_Node *lowest_scheduled;
889
890  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
891
892  if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
893    _Scheduler_SMP_Enqueue_to_scheduled(
894      context,
895      node,
896      insert_priority,
897      lowest_scheduled,
898      insert_scheduled,
899      move_from_scheduled_to_ready,
900      allocate_processor
901    );
902    needs_help = false;
903  } else {
904    ( *insert_ready )( context, node, insert_priority );
905    needs_help = true;
906  }
907
908  return needs_help;
909}
910
911/**
912 * @brief Enqueues a scheduled node according to the specified order
913 * function.
914 *
915 * @param context The scheduler instance context.
916 * @param[in, out] node The node to enqueue.
917 * @param order The order function.
918 * @param extract_from_ready Function to extract a node from the set of
919 *   ready nodes.
920 * @param get_highest_ready Function to get the highest ready node.
921 * @param insert_ready Function to insert a node into the set of ready
922 *   nodes.
923 * @param insert_scheduled Function to insert a node into the set of
924 *   scheduled nodes.
925 * @param move_from_ready_to_scheduled Function to move a node from the set
926 *   of ready nodes to the set of scheduled nodes.
927 * @param allocate_processor Function to allocate a processor to a node
928 *   based on the rules of the scheduler.
929 */
930static inline bool _Scheduler_SMP_Enqueue_scheduled(
931  Scheduler_Context                *context,
932  Scheduler_Node                   *const node,
933  Priority_Control                  insert_priority,
934  Chain_Node_order                  order,
935  Scheduler_SMP_Extract             extract_from_ready,
936  Scheduler_SMP_Get_highest_ready   get_highest_ready,
937  Scheduler_SMP_Insert              insert_ready,
938  Scheduler_SMP_Insert              insert_scheduled,
939  Scheduler_SMP_Move                move_from_ready_to_scheduled,
940  Scheduler_SMP_Allocate_processor  allocate_processor
941)
942{
943  while ( true ) {
944    Scheduler_Node                   *highest_ready;
945    Scheduler_Try_to_schedule_action  action;
946
947    highest_ready = ( *get_highest_ready )( context, node );
948
949    /*
950     * The node has been extracted from the scheduled chain.  We have to place
951     * it now on the scheduled or ready set.
952     */
953    if (
954      node->sticky_level > 0
955        && ( *order )( &insert_priority, &highest_ready->Node.Chain )
956    ) {
957      ( *insert_scheduled )( context, node, insert_priority );
958
959      if ( _Scheduler_Node_get_idle( node ) != NULL ) {
960        Thread_Control   *owner;
961        ISR_lock_Context  lock_context;
962
963        owner = _Scheduler_Node_get_owner( node );
964        _Thread_Scheduler_acquire_critical( owner, &lock_context );
965
966        if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
967          _Thread_Scheduler_cancel_need_for_help(
968            owner,
969            _Thread_Get_CPU( owner )
970          );
971          _Scheduler_Discard_idle_thread(
972            context,
973            owner,
974            node,
975            _Scheduler_SMP_Release_idle_thread
976          );
977          _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
978        }
979
980        _Thread_Scheduler_release_critical( owner, &lock_context );
981      }
982
983      return false;
984    }
985
986    action = _Scheduler_Try_to_schedule_node(
987      context,
988      highest_ready,
989      _Scheduler_Node_get_idle( node ),
990      _Scheduler_SMP_Get_idle_thread
991    );
992
993    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
994      Thread_Control *idle;
995
996      _Scheduler_SMP_Preempt(
997        context,
998        highest_ready,
999        node,
1000        allocate_processor
1001      );
1002
1003      ( *insert_ready )( context, node, insert_priority );
1004      ( *move_from_ready_to_scheduled )( context, highest_ready );
1005
1006      idle = _Scheduler_Release_idle_thread(
1007        context,
1008        node,
1009        _Scheduler_SMP_Release_idle_thread
1010      );
1011      return ( idle == NULL );
1012    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
1013      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1014      _Scheduler_SMP_Node_change_state(
1015        highest_ready,
1016        SCHEDULER_SMP_NODE_SCHEDULED
1017      );
1018
1019      ( *insert_ready )( context, node, insert_priority );
1020      ( *move_from_ready_to_scheduled )( context, highest_ready );
1021
1022      _Scheduler_Exchange_idle_thread(
1023        highest_ready,
1024        node,
1025        _Scheduler_Node_get_idle( node )
1026      );
1027      return false;
1028    } else {
1029      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1030
1031      _Scheduler_SMP_Node_change_state(
1032        highest_ready,
1033        SCHEDULER_SMP_NODE_BLOCKED
1034      );
1035
1036      ( *extract_from_ready )( context, highest_ready );
1037    }
1038  }
1039}
1040
1041/**
1042 * @brief Extracts a scheduled node from the scheduled nodes.
1043 *
1044 * @param context This parameter is unused.
1045 * @param node The node to extract from the chain it belongs to.
1046 */
1047static inline void _Scheduler_SMP_Extract_from_scheduled(
1048  Scheduler_Context *context,
1049  Scheduler_Node    *node
1050)
1051{
1052  (void) context;
1053  _Chain_Extract_unprotected( &node->Node.Chain );
1054}
1055
1056/**
1057 * @brief Schedules the highest ready node.
1058 *
1059 * @param context The scheduler context instance.
1060 * @param victim The node of the thread that is repressed by the newly scheduled thread.
1061 * @param victim_cpu The cpu to allocate.
1062 * @param extract_from_ready Function to extract a node from the set of
1063 *      ready nodes.
1064 * @param get_highest_ready Function to get the highest ready node.
1065 * @param move_from_ready_to_scheduled Function to move a node from the set
1066 *      of ready nodes to the set of scheduled nodes.
1067 * @param allocate_processor Function to allocate a processor to a node
1068 *      based on the rules of the scheduler.
1069 */
1070static inline void _Scheduler_SMP_Schedule_highest_ready(
1071  Scheduler_Context                *context,
1072  Scheduler_Node                   *victim,
1073  Per_CPU_Control                  *victim_cpu,
1074  Scheduler_SMP_Extract             extract_from_ready,
1075  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1076  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1077  Scheduler_SMP_Allocate_processor  allocate_processor
1078)
1079{
1080  Scheduler_Try_to_schedule_action action;
1081
1082  do {
1083    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1084
1085    action = _Scheduler_Try_to_schedule_node(
1086      context,
1087      highest_ready,
1088      NULL,
1089      _Scheduler_SMP_Get_idle_thread
1090    );
1091
1092    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1093      _Scheduler_SMP_Allocate_processor(
1094        context,
1095        highest_ready,
1096        victim,
1097        victim_cpu,
1098        allocate_processor
1099      );
1100
1101      ( *move_from_ready_to_scheduled )( context, highest_ready );
1102    } else {
1103      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1104
1105      _Scheduler_SMP_Node_change_state(
1106        highest_ready,
1107        SCHEDULER_SMP_NODE_BLOCKED
1108      );
1109
1110      ( *extract_from_ready )( context, highest_ready );
1111    }
1112  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1113}
1114
1115/**
1116 * @brief Schedules the highest ready node and preempts a currently executing one.
1117 *
1118 * @param context The scheduler context instance.
1119 * @param victim The node of the thread that is repressed by the newly scheduled thread.
1120 * @param victim_cpu The cpu to allocate.
1121 * @param extract_from_ready Function to extract a node from the set of
1122 *      ready nodes.
1123 * @param get_highest_ready Function to get the highest ready node.
1124 * @param move_from_ready_to_scheduled Function to move a node from the set
1125 *      of ready nodes to the set of scheduled nodes.
1126 * @param allocate_processor Function to allocate a processor to a node
1127 *      based on the rules of the scheduler.
1128 */
1129static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1130  Scheduler_Context                *context,
1131  Scheduler_Node                   *victim,
1132  Per_CPU_Control                  *victim_cpu,
1133  Scheduler_SMP_Extract             extract_from_ready,
1134  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1135  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1136  Scheduler_SMP_Allocate_processor  allocate_processor
1137)
1138{
1139  Scheduler_Try_to_schedule_action action;
1140
1141  do {
1142    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1143
1144    action = _Scheduler_Try_to_schedule_node(
1145      context,
1146      highest_ready,
1147      NULL,
1148      _Scheduler_SMP_Get_idle_thread
1149    );
1150
1151    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1152      _Scheduler_SMP_Preempt(
1153        context,
1154        highest_ready,
1155        victim,
1156        allocate_processor
1157      );
1158
1159      ( *move_from_ready_to_scheduled )( context, highest_ready );
1160    } else {
1161      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1162
1163      _Scheduler_SMP_Node_change_state(
1164        highest_ready,
1165        SCHEDULER_SMP_NODE_BLOCKED
1166      );
1167
1168      ( *extract_from_ready )( context, highest_ready );
1169    }
1170  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1171}
1172
1173/**
1174 * @brief Blocks the thread.
1175 *
1176 * @param context The scheduler instance context.
1177 * @param[in, out] thread The thread of the scheduling operation.
1178 * @param[in, out] node The scheduler node of the thread to block.
1179 * @param extract_from_scheduled Function to extract a node from the set of
1180 *      scheduled nodes.
1181 * @param extract_from_ready Function to extract a node from the set of
1182 *      ready nodes.
1183 * @param get_highest_ready Function to get the highest ready node.
1184 * @param move_from_ready_to_scheduled Function to move a node from the set
1185 *      of ready nodes to the set of scheduled nodes.
1186 * @param allocate_processor Function to allocate a processor to a node
1187 *      based on the rules of the scheduler.
1188 */
1189static inline void _Scheduler_SMP_Block(
1190  Scheduler_Context                *context,
1191  Thread_Control                   *thread,
1192  Scheduler_Node                   *node,
1193  Scheduler_SMP_Extract             extract_from_scheduled,
1194  Scheduler_SMP_Extract             extract_from_ready,
1195  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1196  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1197  Scheduler_SMP_Allocate_processor  allocate_processor
1198)
1199{
1200  Scheduler_SMP_Node_state  node_state;
1201  Per_CPU_Control          *thread_cpu;
1202
1203  node_state = _Scheduler_SMP_Node_state( node );
1204
1205  thread_cpu = _Scheduler_Block_node(
1206    context,
1207    thread,
1208    node,
1209    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1210    _Scheduler_SMP_Get_idle_thread
1211  );
1212
1213  if ( thread_cpu != NULL ) {
1214    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1215
1216    if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1217      ( *extract_from_scheduled )( context, node );
1218      _Scheduler_SMP_Schedule_highest_ready(
1219        context,
1220        node,
1221        thread_cpu,
1222        extract_from_ready,
1223        get_highest_ready,
1224        move_from_ready_to_scheduled,
1225        allocate_processor
1226      );
1227    } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1228      ( *extract_from_ready )( context, node );
1229    }
1230  }
1231}
1232
1233/**
1234 * @brief Unblocks the thread.
1235 *
1236 * @param context The scheduler instance context.
1237 * @param[in, out] thread The thread of the scheduling operation.
1238 * @param[in, out] node The scheduler node of the thread to block.
1239 * @param update Function to update the node's priority to the new value.
1240 * @param enqueue Function to insert a node with a priority in the ready queue
1241 *      of a context.
1242 */
1243static inline void _Scheduler_SMP_Unblock(
1244  Scheduler_Context     *context,
1245  Thread_Control        *thread,
1246  Scheduler_Node        *node,
1247  Scheduler_SMP_Update   update,
1248  Scheduler_SMP_Enqueue  enqueue
1249)
1250{
1251  Scheduler_SMP_Node_state  node_state;
1252  bool                      unblock;
1253
1254  node_state = _Scheduler_SMP_Node_state( node );
1255  unblock = _Scheduler_Unblock_node(
1256    context,
1257    thread,
1258    node,
1259    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1260    _Scheduler_SMP_Release_idle_thread
1261  );
1262
1263  if ( unblock ) {
1264    Priority_Control priority;
1265    bool             needs_help;
1266
1267    priority = _Scheduler_Node_get_priority( node );
1268    priority = SCHEDULER_PRIORITY_PURIFY( priority );
1269
1270    if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1271      ( *update )( context, node, priority );
1272    }
1273
1274    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1275      Priority_Control insert_priority;
1276
1277      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1278      insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1279      needs_help = ( *enqueue )( context, node, insert_priority );
1280    } else {
1281      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1282      _Assert( node->sticky_level > 0 );
1283      _Assert( node->idle == NULL );
1284      needs_help = true;
1285    }
1286
1287    if ( needs_help ) {
1288      _Scheduler_Ask_for_help( thread );
1289    }
1290  }
1291}
1292
1293/**
1294 * @brief Updates the priority of the node and the position in the queues it
1295 * is in.
1296 *
1297 * This function firstly updates the priority of the node and then extracts
1298 * and reinserts it into the queue the node is part of using the given
1299 * functions.
1300 *
1301 * @param context The scheduler instance context.
1302 * @param thread The thread for the operation.
1303 * @param[in, out] node The node to update the priority of.
1304 * @param extract_from_ready Function to extract a node from the ready
1305 *      queue of the scheduler context.
1306 * @param update Function to update the priority of a node in the scheduler
1307 *      context.
1308 * @param enqueue Function to enqueue a node with a given priority.
1309 * @param enqueue_scheduled Function to enqueue a scheduled node.
1310 * @param ask_for_help Function to perform a help request.
1311 */
1312static inline void _Scheduler_SMP_Update_priority(
1313  Scheduler_Context          *context,
1314  Thread_Control             *thread,
1315  Scheduler_Node             *node,
1316  Scheduler_SMP_Extract       extract_from_ready,
1317  Scheduler_SMP_Update        update,
1318  Scheduler_SMP_Enqueue       enqueue,
1319  Scheduler_SMP_Enqueue       enqueue_scheduled,
1320  Scheduler_SMP_Ask_for_help  ask_for_help
1321)
1322{
1323  Priority_Control         priority;
1324  Priority_Control         insert_priority;
1325  Scheduler_SMP_Node_state node_state;
1326
1327  insert_priority = _Scheduler_Node_get_priority( node );
1328  priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
1329
1330  if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
1331    if ( _Thread_Is_ready( thread ) ) {
1332      ( *ask_for_help )( context, thread, node );
1333    }
1334
1335    return;
1336  }
1337
1338  node_state = _Scheduler_SMP_Node_state( node );
1339
1340  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1341    _Scheduler_SMP_Extract_from_scheduled( context, node );
1342    ( *update )( context, node, priority );
1343    ( *enqueue_scheduled )( context, node, insert_priority );
1344  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1345    ( *extract_from_ready )( context, node );
1346    ( *update )( context, node, priority );
1347    ( *enqueue )( context, node, insert_priority );
1348  } else {
1349    ( *update )( context, node, priority );
1350
1351    if ( _Thread_Is_ready( thread ) ) {
1352      ( *ask_for_help )( context, thread, node );
1353    }
1354  }
1355}
1356
1357/**
1358 * @brief Performs a yield and asks for help if necessary.
1359 *
1360 * @param context The scheduler instance context.
1361 * @param thread The thread for the operation.
1362 * @param node The node of the thread that yields.
1363 * @param extract_from_ready Function to extract a node from the ready
1364 *      queue of the scheduler context.
1365 * @param enqueue Function to enqueue a node with a given priority.
1366 * @param enqueue_scheduled Function to enqueue a scheduled node.
1367 */
1368static inline void _Scheduler_SMP_Yield(
1369  Scheduler_Context     *context,
1370  Thread_Control        *thread,
1371  Scheduler_Node        *node,
1372  Scheduler_SMP_Extract  extract_from_ready,
1373  Scheduler_SMP_Enqueue  enqueue,
1374  Scheduler_SMP_Enqueue  enqueue_scheduled
1375)
1376{
1377  bool                     needs_help;
1378  Scheduler_SMP_Node_state node_state;
1379  Priority_Control         insert_priority;
1380
1381  node_state = _Scheduler_SMP_Node_state( node );
1382  insert_priority = _Scheduler_SMP_Node_priority( node );
1383  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1384
1385  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1386    _Scheduler_SMP_Extract_from_scheduled( context, node );
1387    ( *enqueue_scheduled )( context, node, insert_priority );
1388    needs_help = false;
1389  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1390    ( *extract_from_ready )( context, node );
1391
1392    needs_help = ( *enqueue )( context, node, insert_priority );
1393  } else {
1394    needs_help = true;
1395  }
1396
1397  if ( needs_help ) {
1398    _Scheduler_Ask_for_help( thread );
1399  }
1400}
1401
1402/**
1403 * @brief Inserts the node with the given priority into the scheduled nodes.
1404 *
1405 * @param context The scheduler instance context.
1406 * @param node_to_insert The scheduled node to insert.
1407 * @param priority_to_insert The priority with which to insert the node.
1408 */
1409static inline void _Scheduler_SMP_Insert_scheduled(
1410  Scheduler_Context *context,
1411  Scheduler_Node    *node_to_insert,
1412  Priority_Control   priority_to_insert
1413)
1414{
1415  Scheduler_SMP_Context *self;
1416
1417  self = _Scheduler_SMP_Get_self( context );
1418
1419  _Chain_Insert_ordered_unprotected(
1420    &self->Scheduled,
1421    &node_to_insert->Node.Chain,
1422    &priority_to_insert,
1423    _Scheduler_SMP_Priority_less_equal
1424  );
1425}
1426
1427/**
1428 * @brief Asks for help.
1429 *
1430 * @param context The scheduler instance context.
1431 * @param thread The thread that asks for help.
1432 * @param[in, out] node The node of the thread that performs the ask for help
1433 *      operation.
1434 * @param order The order function.
1435 * @param insert_ready Function to insert a node into the set of ready
1436 *      nodes.
1437 * @param insert_scheduled Function to insert a node into the set of
1438 *      scheduled nodes.
1439 * @param move_from_scheduled_to_ready Function to move a node from the set
1440 *      of scheduled nodes to the set of ready nodes.
1441 * @param get_lowest_scheduled Function to select the node from the
1442 *      scheduled nodes to replace.
1443 * @param allocate_processor Function to allocate a processor to a node
1444 *      based on the rules of the scheduler.
1445 *
1446 * @retval true The ask for help operation was successful.
1447 * @retval false The ask for help operation was not successful.
1448 */
1449static inline bool _Scheduler_SMP_Ask_for_help(
1450  Scheduler_Context                  *context,
1451  Thread_Control                     *thread,
1452  Scheduler_Node                     *node,
1453  Chain_Node_order                    order,
1454  Scheduler_SMP_Insert                insert_ready,
1455  Scheduler_SMP_Insert                insert_scheduled,
1456  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1457  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1458  Scheduler_SMP_Allocate_processor    allocate_processor
1459)
1460{
1461  Scheduler_Node   *lowest_scheduled;
1462  ISR_lock_Context  lock_context;
1463  bool              success;
1464
1465  if ( thread->Scheduler.pinned_scheduler != NULL ) {
1466    /*
1467     * Pinned threads are not allowed to ask for help.  Return success to break
1468     * the loop in _Thread_Ask_for_help() early.
1469     */
1470    return true;
1471  }
1472
1473  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1474
1475  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1476
1477  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1478    Scheduler_SMP_Node_state node_state;
1479
1480    node_state = _Scheduler_SMP_Node_state( node );
1481
1482    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1483      Priority_Control insert_priority;
1484
1485      insert_priority = _Scheduler_SMP_Node_priority( node );
1486
1487      if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
1488        _Thread_Scheduler_cancel_need_for_help(
1489          thread,
1490          _Thread_Get_CPU( thread )
1491        );
1492        _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1493        _Thread_Scheduler_release_critical( thread, &lock_context );
1494
1495        _Scheduler_SMP_Preempt(
1496          context,
1497          node,
1498          lowest_scheduled,
1499          allocate_processor
1500        );
1501
1502        ( *insert_scheduled )( context, node, insert_priority );
1503        ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1504
1505        _Scheduler_Release_idle_thread(
1506          context,
1507          lowest_scheduled,
1508          _Scheduler_SMP_Release_idle_thread
1509        );
1510        success = true;
1511      } else {
1512        _Thread_Scheduler_release_critical( thread, &lock_context );
1513        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1514        ( *insert_ready )( context, node, insert_priority );
1515        success = false;
1516      }
1517    } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1518      _Thread_Scheduler_cancel_need_for_help(
1519        thread,
1520        _Thread_Get_CPU( thread )
1521      );
1522      _Scheduler_Discard_idle_thread(
1523        context,
1524        thread,
1525        node,
1526        _Scheduler_SMP_Release_idle_thread
1527      );
1528      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1529      _Thread_Scheduler_release_critical( thread, &lock_context );
1530      success = true;
1531    } else {
1532      _Thread_Scheduler_release_critical( thread, &lock_context );
1533      success = false;
1534    }
1535  } else {
1536    _Thread_Scheduler_release_critical( thread, &lock_context );
1537    success = false;
1538  }
1539
1540  return success;
1541}
1542
1543/**
1544 * @brief Reconsiders help request.
1545 *
1546 * @param context The scheduler context instance.
1547 * @param thread The thread to reconsider the help request of.
1548 * @param[in, out] node The scheduler node of @a thread.
1549 * @param extract_from_ready Function to extract a node from the ready queue
1550 *      of the scheduler context.
1551 */
1552static inline void _Scheduler_SMP_Reconsider_help_request(
1553  Scheduler_Context     *context,
1554  Thread_Control        *thread,
1555  Scheduler_Node        *node,
1556  Scheduler_SMP_Extract  extract_from_ready
1557)
1558{
1559  ISR_lock_Context lock_context;
1560
1561  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1562
1563  if (
1564    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1565      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1566      && node->sticky_level == 1
1567  ) {
1568    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1569    ( *extract_from_ready )( context, node );
1570  }
1571
1572  _Thread_Scheduler_release_critical( thread, &lock_context );
1573}
1574
1575/**
1576 * @brief Withdraws the node.
1577 *
1578 * @param context The scheduler context instance.
1579 * @param[in, out] thread The thread to change to @a next_state.
1580 * @param[in, out] node The node to withdraw.
1581 * @param next_state The new state for @a thread.
1582 * @param extract_from_ready Function to extract a node from the ready queue
1583 *      of the scheduler context.
1584 * @param get_highest_ready Function to get the highest ready node.
1585 * @param move_from_ready_to_scheduled Function to move a node from the set
1586 *      of ready nodes to the set of scheduled nodes.
1587 * @param allocate_processor Function to allocate a processor to a node
1588 *      based on the rules of the scheduler.
1589 */
1590static inline void _Scheduler_SMP_Withdraw_node(
1591  Scheduler_Context                *context,
1592  Thread_Control                   *thread,
1593  Scheduler_Node                   *node,
1594  Thread_Scheduler_state            next_state,
1595  Scheduler_SMP_Extract             extract_from_ready,
1596  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1597  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1598  Scheduler_SMP_Allocate_processor  allocate_processor
1599)
1600{
1601  ISR_lock_Context         lock_context;
1602  Scheduler_SMP_Node_state node_state;
1603
1604  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1605
1606  node_state = _Scheduler_SMP_Node_state( node );
1607  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1608
1609  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1610    Per_CPU_Control *thread_cpu;
1611
1612    thread_cpu = _Thread_Get_CPU( thread );
1613    _Scheduler_Thread_change_state( thread, next_state );
1614    _Thread_Scheduler_release_critical( thread, &lock_context );
1615
1616    _Scheduler_SMP_Extract_from_scheduled( context, node );
1617    _Scheduler_SMP_Schedule_highest_ready(
1618      context,
1619      node,
1620      thread_cpu,
1621      extract_from_ready,
1622      get_highest_ready,
1623      move_from_ready_to_scheduled,
1624      allocate_processor
1625    );
1626  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1627    _Thread_Scheduler_release_critical( thread, &lock_context );
1628    ( *extract_from_ready )( context, node );
1629  } else {
1630    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1631    _Thread_Scheduler_release_critical( thread, &lock_context );
1632  }
1633}
1634
1635/**
1636 * @brief Starts the idle thread on the given processor.
1637 *
1638 * @param context The scheduler context instance.
1639 * @param[in, out] idle The idle thread to schedule.
1640 * @param cpu The processor for the idle thread.
1641 * @param register_idle Function to register the idle thread for a cpu.
1642 */
1643static inline void _Scheduler_SMP_Do_start_idle(
1644  Scheduler_Context           *context,
1645  Thread_Control              *idle,
1646  Per_CPU_Control             *cpu,
1647  Scheduler_SMP_Register_idle  register_idle
1648)
1649{
1650  Scheduler_SMP_Context *self;
1651  Scheduler_SMP_Node    *node;
1652
1653  self = _Scheduler_SMP_Get_self( context );
1654  node = _Scheduler_SMP_Thread_get_node( idle );
1655
1656  _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
1657  node->state = SCHEDULER_SMP_NODE_SCHEDULED;
1658
1659  _Thread_Set_CPU( idle, cpu );
1660  ( *register_idle )( context, &node->Base, cpu );
1661  _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
1662  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1663}
1664
1665/**
1666 * @brief Adds the idle thread to the processor.
1667 *
1668 * @param context The scheduler context instance.
1669 * @param[in, out] idle The idle thread to add to the processor.
1670 * @param has_ready Function that checks if a given context has ready threads.
1671 * @param enqueue_scheduled Function to enqueue a scheduled node.
1672 * @param register_idle Function to register the idle thread for a cpu.
1673 */
1674static inline void _Scheduler_SMP_Add_processor(
1675  Scheduler_Context           *context,
1676  Thread_Control              *idle,
1677  Scheduler_SMP_Has_ready      has_ready,
1678  Scheduler_SMP_Enqueue        enqueue_scheduled,
1679  Scheduler_SMP_Register_idle  register_idle
1680)
1681{
1682  Scheduler_SMP_Context *self;
1683  Scheduler_Node        *node;
1684
1685  self = _Scheduler_SMP_Get_self( context );
1686  idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1687  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1688  node = _Thread_Scheduler_get_home_node( idle );
1689  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
1690  ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
1691
1692  if ( ( *has_ready )( &self->Base ) ) {
1693    Priority_Control insert_priority;
1694
1695    insert_priority = _Scheduler_SMP_Node_priority( node );
1696    insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1697    ( *enqueue_scheduled )( &self->Base, node, insert_priority );
1698  } else {
1699    _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
1700  }
1701}
1702
1703/**
1704 * @brief Removes an idle thread from the processor.
1705 *
1706 * @param context The scheduler context instance.
1707 * @param cpu The processor to remove from.
1708 * @param extract_from_ready Function to extract a node from the ready queue
1709 *      of the scheduler context.
1710 * @param enqueue Function to enqueue a node with a given priority.
1711 *
1712 * @return The idle thread of @a cpu.
1713 */
1714static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1715  Scheduler_Context     *context,
1716  Per_CPU_Control       *cpu,
1717  Scheduler_SMP_Extract  extract_from_ready,
1718  Scheduler_SMP_Enqueue  enqueue
1719)
1720{
1721  Scheduler_SMP_Context *self;
1722  Chain_Node            *chain_node;
1723  Scheduler_Node        *victim_node;
1724  Thread_Control        *victim_user;
1725  Thread_Control        *victim_owner;
1726  Thread_Control        *idle;
1727
1728  self = _Scheduler_SMP_Get_self( context );
1729  chain_node = _Chain_First( &self->Scheduled );
1730
1731  do {
1732    _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1733    victim_node = (Scheduler_Node *) chain_node;
1734    victim_user = _Scheduler_Node_get_user( victim_node );
1735    chain_node = _Chain_Next( chain_node );
1736  } while ( _Thread_Get_CPU( victim_user ) != cpu );
1737
1738  _Scheduler_SMP_Extract_from_scheduled( context, victim_node );
1739  victim_owner = _Scheduler_Node_get_owner( victim_node );
1740
1741  if ( !victim_owner->is_idle ) {
1742    Scheduler_Node *idle_node;
1743
1744    _Scheduler_Release_idle_thread(
1745      &self->Base,
1746      victim_node,
1747      _Scheduler_SMP_Release_idle_thread
1748    );
1749    idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
1750    idle_node = _Thread_Scheduler_get_home_node( idle );
1751    ( *extract_from_ready )( &self->Base, idle_node );
1752    _Scheduler_SMP_Preempt(
1753      &self->Base,
1754      idle_node,
1755      victim_node,
1756      _Scheduler_SMP_Allocate_processor_exact
1757    );
1758
1759    if ( !_Chain_Is_empty( &self->Scheduled ) ) {
1760      Priority_Control insert_priority;
1761
1762      insert_priority = _Scheduler_SMP_Node_priority( victim_node );
1763      insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1764      ( *enqueue )( context, victim_node, insert_priority );
1765    }
1766  } else {
1767    _Assert( victim_owner == victim_user );
1768    _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
1769    idle = victim_owner;
1770    _Scheduler_SMP_Exctract_idle_thread( idle );
1771  }
1772
1773  return idle;
1774}
1775
1776/**
1777 * @brief Sets the affinity of the node.
1778 *
1779 * Also performs a reinsert into the queue the node is currently in.
1780 *
1781 * @param context The scheduler context instance.
1782 * @param thread The thread for the operation.
1783 * @param[in, out] node The node to set the affinity of.
1784 * @param arg The affinity for @a node.
1785 * @param set_affinity Function to set the affinity of a node.
1786 * @param extract_from_ready Function to extract a node from the ready queue
1787 *      of the scheduler context.
1788 * @param get_highest_ready Function to get the highest ready node.
1789 * @param move_from_ready_to_scheduled Function to move a node from the set
1790 *      of ready nodes to the set of scheduled nodes.
1791 * @param enqueue Function to enqueue a node with a given priority.
1792 * @param allocate_processor Function to allocate a processor to a node
1793 *      based on the rules of the scheduler.
1794 */
1795static inline void _Scheduler_SMP_Set_affinity(
1796  Scheduler_Context               *context,
1797  Thread_Control                  *thread,
1798  Scheduler_Node                  *node,
1799  void                            *arg,
1800  Scheduler_SMP_Set_affinity       set_affinity,
1801  Scheduler_SMP_Extract            extract_from_ready,
1802  Scheduler_SMP_Get_highest_ready  get_highest_ready,
1803  Scheduler_SMP_Move               move_from_ready_to_scheduled,
1804  Scheduler_SMP_Enqueue            enqueue,
1805  Scheduler_SMP_Allocate_processor allocate_processor
1806)
1807{
1808  Scheduler_SMP_Node_state node_state;
1809  Priority_Control         insert_priority;
1810
1811  node_state = _Scheduler_SMP_Node_state( node );
1812  insert_priority = _Scheduler_SMP_Node_priority( node );
1813  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1814
1815  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1816    _Scheduler_SMP_Extract_from_scheduled( context, node );
1817    _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1818      context,
1819      node,
1820      _Thread_Get_CPU( thread ),
1821      extract_from_ready,
1822      get_highest_ready,
1823      move_from_ready_to_scheduled,
1824      allocate_processor
1825    );
1826    ( *set_affinity )( context, node, arg );
1827    ( *enqueue )( context, node, insert_priority );
1828  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1829    ( *extract_from_ready )( context, node );
1830    ( *set_affinity )( context, node, arg );
1831    ( *enqueue )( context, node, insert_priority );
1832  } else {
1833    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1834    ( *set_affinity )( context, node, arg );
1835  }
1836}
1837
1838/** @} */
1839
1840#ifdef __cplusplus
1841}
1842#endif /* __cplusplus */
1843
1844#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.