source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ 0c286e3

5
Last change on this file since 0c286e3 was 0c286e3, checked in by Sebastian Huber <sebastian.huber@…>, on 10/25/17 at 14:00:17

score: _Chain_Insert_ordered_unprotected()

Change the chain order relation to use a directly specified left hand
side value. This is similar to _RBTree_Insert_inline() and helps the
compiler to better optimize the code.

  • Property mode set to 100644
File size: 41.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013, 2017 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30#include <rtems/bspIo.h>
31
32#ifdef __cplusplus
33extern "C" {
34#endif /* __cplusplus */
35
36/**
37 * @addtogroup ScoreSchedulerSMP
38 *
39 * The scheduler nodes can be in four states
40 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
41 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
42 * - @ref SCHEDULER_SMP_NODE_READY.
43 *
44 * State transitions are triggered via basic operations
45 * - _Scheduler_SMP_Enqueue_ordered(),
46 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
47 * - _Scheduler_SMP_Block().
48 *
49 * @dot
50 * digraph {
51 *   node [style="filled"];
52 *
53 *   bs [label="BLOCKED"];
54 *   ss [label="SCHEDULED", fillcolor="green"];
55 *   rs [label="READY", fillcolor="red"];
56 *
57 *   edge [label="enqueue"];
58 *   edge [fontcolor="darkgreen", color="darkgreen"];
59 *
60 *   bs -> ss;
61 *
62 *   edge [fontcolor="red", color="red"];
63 *
64 *   bs -> rs;
65 *
66 *   edge [label="enqueue other"];
67 *
68 *   ss -> rs;
69 *
70 *   edge [label="block"];
71 *   edge [fontcolor="black", color="black"];
72 *
73 *   ss -> bs;
74 *   rs -> bs;
75 *
76 *   edge [label="block other"];
77 *   edge [fontcolor="darkgreen", color="darkgreen"];
78 *
79 *   rs -> ss;
80 * }
81 * @enddot
82 *
83 * During system initialization each processor of the scheduler instance starts
84 * with an idle thread assigned to it.  Lets have a look at an example with two
85 * idle threads I and J with priority 5.  We also have blocked threads A, B and
86 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
87 * with respect to the thread priority from left to right in the below
88 * diagrams.  The highest priority node (lowest priority number) is the
89 * leftmost node.  Since the processor assignment is independent of the thread
90 * priority the processor indices may move from one state to the other.
91 *
92 * @dot
93 * digraph {
94 *   node [style="filled"];
95 *   edge [dir="none"];
96 *   subgraph {
97 *     rank = same;
98 *
99 *     i [label="I (5)", fillcolor="green"];
100 *     j [label="J (5)", fillcolor="green"];
101 *     a [label="A (1)"];
102 *     b [label="B (2)"];
103 *     c [label="C (3)"];
104 *     i -> j;
105 *   }
106 *
107 *   subgraph {
108 *     rank = same;
109 *
110 *     p0 [label="PROCESSOR 0", shape="box"];
111 *     p1 [label="PROCESSOR 1", shape="box"];
112 *   }
113 *
114 *   i -> p0;
115 *   j -> p1;
116 * }
117 * @enddot
118 *
119 * Lets start A.  For this an enqueue operation is performed.
120 *
121 * @dot
122 * digraph {
123 *   node [style="filled"];
124 *   edge [dir="none"];
125 *
126 *   subgraph {
127 *     rank = same;
128 *
129 *     i [label="I (5)", fillcolor="green"];
130 *     j [label="J (5)", fillcolor="red"];
131 *     a [label="A (1)", fillcolor="green"];
132 *     b [label="B (2)"];
133 *     c [label="C (3)"];
134 *     a -> i;
135 *   }
136 *
137 *   subgraph {
138 *     rank = same;
139 *
140 *     p0 [label="PROCESSOR 0", shape="box"];
141 *     p1 [label="PROCESSOR 1", shape="box"];
142 *   }
143 *
144 *   i -> p0;
145 *   a -> p1;
146 * }
147 * @enddot
148 *
149 * Lets start C.
150 *
151 * @dot
152 * digraph {
153 *   node [style="filled"];
154 *   edge [dir="none"];
155 *
156 *   subgraph {
157 *     rank = same;
158 *
159 *     a [label="A (1)", fillcolor="green"];
160 *     c [label="C (3)", fillcolor="green"];
161 *     i [label="I (5)", fillcolor="red"];
162 *     j [label="J (5)", fillcolor="red"];
163 *     b [label="B (2)"];
164 *     a -> c;
165 *     i -> j;
166 *   }
167 *
168 *   subgraph {
169 *     rank = same;
170 *
171 *     p0 [label="PROCESSOR 0", shape="box"];
172 *     p1 [label="PROCESSOR 1", shape="box"];
173 *   }
174 *
175 *   c -> p0;
176 *   a -> p1;
177 * }
178 * @enddot
179 *
180 * Lets start B.
181 *
182 * @dot
183 * digraph {
184 *   node [style="filled"];
185 *   edge [dir="none"];
186 *
187 *   subgraph {
188 *     rank = same;
189 *
190 *     a [label="A (1)", fillcolor="green"];
191 *     b [label="B (2)", fillcolor="green"];
192 *     c [label="C (3)", fillcolor="red"];
193 *     i [label="I (5)", fillcolor="red"];
194 *     j [label="J (5)", fillcolor="red"];
195 *     a -> b;
196 *     c -> i -> j;
197 *   }
198 *
199 *   subgraph {
200 *     rank = same;
201 *
202 *     p0 [label="PROCESSOR 0", shape="box"];
203 *     p1 [label="PROCESSOR 1", shape="box"];
204 *   }
205 *
206 *   b -> p0;
207 *   a -> p1;
208 * }
209 * @enddot
210 *
211 * Lets change the priority of thread A to 4.
212 *
213 * @dot
214 * digraph {
215 *   node [style="filled"];
216 *   edge [dir="none"];
217 *
218 *   subgraph {
219 *     rank = same;
220 *
221 *     b [label="B (2)", fillcolor="green"];
222 *     c [label="C (3)", fillcolor="green"];
223 *     a [label="A (4)", fillcolor="red"];
224 *     i [label="I (5)", fillcolor="red"];
225 *     j [label="J (5)", fillcolor="red"];
226 *     b -> c;
227 *     a -> i -> j;
228 *   }
229 *
230 *   subgraph {
231 *     rank = same;
232 *
233 *     p0 [label="PROCESSOR 0", shape="box"];
234 *     p1 [label="PROCESSOR 1", shape="box"];
235 *   }
236 *
237 *   b -> p0;
238 *   c -> p1;
239 * }
240 * @enddot
241 *
242 * Now perform a blocking operation with thread B.  Please note that thread A
243 * migrated now from processor 0 to processor 1 and thread C still executes on
244 * processor 1.
245 *
246 * @dot
247 * digraph {
248 *   node [style="filled"];
249 *   edge [dir="none"];
250 *
251 *   subgraph {
252 *     rank = same;
253 *
254 *     c [label="C (3)", fillcolor="green"];
255 *     a [label="A (4)", fillcolor="green"];
256 *     i [label="I (5)", fillcolor="red"];
257 *     j [label="J (5)", fillcolor="red"];
258 *     b [label="B (2)"];
259 *     c -> a;
260 *     i -> j;
261 *   }
262 *
263 *   subgraph {
264 *     rank = same;
265 *
266 *     p0 [label="PROCESSOR 0", shape="box"];
267 *     p1 [label="PROCESSOR 1", shape="box"];
268 *   }
269 *
270 *   a -> p0;
271 *   c -> p1;
272 * }
273 * @enddot
274 *
275 * @{
276 */
277
278typedef bool ( *Scheduler_SMP_Has_ready )(
279  Scheduler_Context *context
280);
281
282typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
283  Scheduler_Context *context,
284  Scheduler_Node    *node
285);
286
287typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
288  Scheduler_Context *context,
289  Scheduler_Node    *filter
290);
291
292typedef void ( *Scheduler_SMP_Extract )(
293  Scheduler_Context *context,
294  Scheduler_Node    *node_to_extract
295);
296
297typedef void ( *Scheduler_SMP_Insert )(
298  Scheduler_Context *context,
299  Scheduler_Node    *node_to_insert
300);
301
302typedef void ( *Scheduler_SMP_Move )(
303  Scheduler_Context *context,
304  Scheduler_Node    *node_to_move
305);
306
307typedef bool ( *Scheduler_SMP_Ask_for_help )(
308  Scheduler_Context *context,
309  Thread_Control    *thread,
310  Scheduler_Node    *node
311);
312
313typedef void ( *Scheduler_SMP_Update )(
314  Scheduler_Context *context,
315  Scheduler_Node    *node_to_update,
316  Priority_Control   new_priority
317);
318
319typedef void ( *Scheduler_SMP_Set_affinity )(
320  Scheduler_Context *context,
321  Scheduler_Node    *node,
322  void              *arg
323);
324
325typedef bool ( *Scheduler_SMP_Enqueue )(
326  Scheduler_Context *context,
327  Scheduler_Node    *node_to_enqueue
328);
329
330typedef void ( *Scheduler_SMP_Allocate_processor )(
331  Scheduler_Context *context,
332  Scheduler_Node    *scheduled,
333  Scheduler_Node    *victim,
334  Per_CPU_Control   *victim_cpu
335);
336
337typedef void ( *Scheduler_SMP_Register_idle )(
338  Scheduler_Context *context,
339  Scheduler_Node    *idle,
340  Per_CPU_Control   *cpu
341);
342
343static inline void _Scheduler_SMP_Do_nothing_register_idle(
344  Scheduler_Context *context,
345  Scheduler_Node    *idle,
346  Per_CPU_Control   *cpu
347)
348{
349  (void) context;
350  (void) idle;
351  (void) cpu;
352}
353
354static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
355  const void       *to_insert,
356  const Chain_Node *next
357)
358{
359  const Priority_Control   *priority_to_insert;
360  const Scheduler_SMP_Node *node_next;
361
362  priority_to_insert = (const Priority_Control *) to_insert;
363  node_next = (const Scheduler_SMP_Node *) next;
364
365  return *priority_to_insert <= node_next->priority;
366}
367
368static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
369  const void       *to_insert,
370  const Chain_Node *next
371)
372{
373  const Priority_Control   *priority_to_insert;
374  const Scheduler_SMP_Node *node_next;
375
376  priority_to_insert = (const Priority_Control *) to_insert;
377  node_next = (const Scheduler_SMP_Node *) next;
378
379  return *priority_to_insert < node_next->priority;
380}
381
382static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
383  Scheduler_Context *context
384)
385{
386  return (Scheduler_SMP_Context *) context;
387}
388
389static inline void _Scheduler_SMP_Initialize(
390  Scheduler_SMP_Context *self
391)
392{
393  _Chain_Initialize_empty( &self->Scheduled );
394  _Chain_Initialize_empty( &self->Idle_threads );
395}
396
397static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
398  Thread_Control *thread
399)
400{
401  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
402}
403
404static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
405  Thread_Control *thread
406)
407{
408  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
409}
410
411static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
412  Scheduler_Node *node
413)
414{
415  return (Scheduler_SMP_Node *) node;
416}
417
418static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
419  const Scheduler_Node *node
420)
421{
422  return ( (const Scheduler_SMP_Node *) node )->state;
423}
424
425static inline Priority_Control _Scheduler_SMP_Node_priority(
426  const Scheduler_Node *node
427)
428{
429  return ( (const Scheduler_SMP_Node *) node )->priority;
430}
431
432static inline void _Scheduler_SMP_Node_initialize(
433  const Scheduler_Control *scheduler,
434  Scheduler_SMP_Node      *node,
435  Thread_Control          *thread,
436  Priority_Control         priority
437)
438{
439  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
440  node->state = SCHEDULER_SMP_NODE_BLOCKED;
441  node->priority = priority;
442}
443
444static inline void _Scheduler_SMP_Node_update_priority(
445  Scheduler_SMP_Node *node,
446  Priority_Control    new_priority
447)
448{
449  node->priority = new_priority;
450}
451
452static inline void _Scheduler_SMP_Node_change_state(
453  Scheduler_Node           *node,
454  Scheduler_SMP_Node_state  new_state
455)
456{
457  Scheduler_SMP_Node *the_node;
458
459  the_node = _Scheduler_SMP_Node_downcast( node );
460  the_node->state = new_state;
461}
462
463static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
464  const Scheduler_Context *context,
465  const Per_CPU_Control   *cpu
466)
467{
468  return cpu->Scheduler.context == context;
469}
470
471static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
472  Scheduler_Context *context
473)
474{
475  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
476  Thread_Control *idle = (Thread_Control *)
477    _Chain_Get_first_unprotected( &self->Idle_threads );
478
479  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
480
481  return idle;
482}
483
484static inline void _Scheduler_SMP_Release_idle_thread(
485  Scheduler_Context *context,
486  Thread_Control    *idle
487)
488{
489  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
490
491  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
492}
493
494static inline void _Scheduler_SMP_Exctract_idle_thread(
495  Thread_Control *idle
496)
497{
498  _Chain_Extract_unprotected( &idle->Object.Node );
499}
500
501static inline void _Scheduler_SMP_Allocate_processor_lazy(
502  Scheduler_Context *context,
503  Scheduler_Node    *scheduled,
504  Scheduler_Node    *victim,
505  Per_CPU_Control   *victim_cpu
506)
507{
508  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
509  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
510  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
511  Per_CPU_Control *cpu_self = _Per_CPU_Get();
512  Thread_Control *heir;
513
514  _Assert( _ISR_Get_level() != 0 );
515
516  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
517    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
518      heir = scheduled_cpu->heir;
519      _Thread_Dispatch_update_heir(
520        cpu_self,
521        scheduled_cpu,
522        scheduled_thread
523      );
524    } else {
525      /* We have to force a migration to our processor set */
526      heir = scheduled_thread;
527    }
528  } else {
529    heir = scheduled_thread;
530  }
531
532  if ( heir != victim_thread ) {
533    _Thread_Set_CPU( heir, victim_cpu );
534    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
535  }
536}
537
538/*
539 * This method is slightly different from
540 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
541 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
542 * but does not take into account affinity.
543 */
544static inline void _Scheduler_SMP_Allocate_processor_exact(
545  Scheduler_Context *context,
546  Scheduler_Node    *scheduled,
547  Scheduler_Node    *victim,
548  Per_CPU_Control   *victim_cpu
549)
550{
551  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
552  Per_CPU_Control *cpu_self = _Per_CPU_Get();
553
554  (void) context;
555  (void) victim;
556
557  _Thread_Set_CPU( scheduled_thread, victim_cpu );
558  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
559}
560
561static inline void _Scheduler_SMP_Allocate_processor(
562  Scheduler_Context                *context,
563  Scheduler_Node                   *scheduled,
564  Scheduler_Node                   *victim,
565  Per_CPU_Control                  *victim_cpu,
566  Scheduler_SMP_Allocate_processor  allocate_processor
567)
568{
569  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
570  ( *allocate_processor )( context, scheduled, victim, victim_cpu );
571}
572
573static inline Thread_Control *_Scheduler_SMP_Preempt(
574  Scheduler_Context                *context,
575  Scheduler_Node                   *scheduled,
576  Scheduler_Node                   *victim,
577  Scheduler_SMP_Allocate_processor  allocate_processor
578)
579{
580  Thread_Control   *victim_thread;
581  ISR_lock_Context  lock_context;
582  Per_CPU_Control  *victim_cpu;
583
584  victim_thread = _Scheduler_Node_get_user( victim );
585  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
586
587  _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
588
589  victim_cpu = _Thread_Get_CPU( victim_thread );
590
591  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
592    _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
593
594    if ( victim_thread->Scheduler.helping_nodes > 0 ) {
595      _Per_CPU_Acquire( victim_cpu );
596      _Chain_Append_unprotected(
597        &victim_cpu->Threads_in_need_for_help,
598        &victim_thread->Scheduler.Help_node
599      );
600      _Per_CPU_Release( victim_cpu );
601    }
602  }
603
604  _Thread_Scheduler_release_critical( victim_thread, &lock_context );
605
606  _Scheduler_SMP_Allocate_processor(
607    context,
608    scheduled,
609    victim,
610    victim_cpu,
611    allocate_processor
612  );
613
614  return victim_thread;
615}
616
617static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
618  Scheduler_Context *context,
619  Scheduler_Node    *filter
620)
621{
622  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
623  Chain_Control *scheduled = &self->Scheduled;
624  Scheduler_Node *lowest_scheduled =
625    (Scheduler_Node *) _Chain_Last( scheduled );
626
627  (void) filter;
628
629  _Assert( &lowest_scheduled->Node.Chain != _Chain_Tail( scheduled ) );
630  _Assert(
631    _Chain_Next( &lowest_scheduled->Node.Chain ) == _Chain_Tail( scheduled )
632  );
633
634  return lowest_scheduled;
635}
636
637static inline void _Scheduler_SMP_Enqueue_to_scheduled(
638  Scheduler_Context                *context,
639  Scheduler_Node                   *node,
640  Scheduler_Node                   *lowest_scheduled,
641  Scheduler_SMP_Insert              insert_scheduled,
642  Scheduler_SMP_Move                move_from_scheduled_to_ready,
643  Scheduler_SMP_Allocate_processor  allocate_processor
644)
645{
646  Scheduler_Try_to_schedule_action action;
647
648  action = _Scheduler_Try_to_schedule_node(
649    context,
650    node,
651    _Scheduler_Node_get_idle( lowest_scheduled ),
652    _Scheduler_SMP_Get_idle_thread
653  );
654
655  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
656    _Scheduler_SMP_Preempt(
657      context,
658      node,
659      lowest_scheduled,
660      allocate_processor
661    );
662
663    ( *insert_scheduled )( context, node );
664    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
665
666    _Scheduler_Release_idle_thread(
667      context,
668      lowest_scheduled,
669      _Scheduler_SMP_Release_idle_thread
670    );
671  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
672    _Scheduler_SMP_Node_change_state(
673      lowest_scheduled,
674      SCHEDULER_SMP_NODE_READY
675    );
676    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
677
678    ( *insert_scheduled )( context, node );
679    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
680
681    _Scheduler_Exchange_idle_thread(
682      node,
683      lowest_scheduled,
684      _Scheduler_Node_get_idle( lowest_scheduled )
685    );
686  } else {
687    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
688    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
689  }
690}
691
692/**
693 * @brief Enqueues a node according to the specified order function.
694 *
695 * The node must not be in the scheduled state.
696 *
697 * @param[in] context The scheduler instance context.
698 * @param[in] node The node to enqueue.
699 * @param[in] order The order function.
700 * @param[in] insert_ready Function to insert a node into the set of ready
701 *   nodes.
702 * @param[in] insert_scheduled Function to insert a node into the set of
703 *   scheduled nodes.
704 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
705 *   of scheduled nodes to the set of ready nodes.
706 * @param[in] get_lowest_scheduled Function to select the node from the
707 *   scheduled nodes to replace.  It may not be possible to find one, in this
708 *   case a pointer must be returned so that the order functions returns false
709 *   if this pointer is passed as the second argument to the order function.
710 * @param[in] allocate_processor Function to allocate a processor to a node
711 *   based on the rules of the scheduler.
712 */
713static inline bool _Scheduler_SMP_Enqueue_ordered(
714  Scheduler_Context                  *context,
715  Scheduler_Node                     *node,
716  Chain_Node_order                    order,
717  Scheduler_SMP_Insert                insert_ready,
718  Scheduler_SMP_Insert                insert_scheduled,
719  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
720  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
721  Scheduler_SMP_Allocate_processor    allocate_processor
722)
723{
724  bool              needs_help;
725  Scheduler_Node   *lowest_scheduled;
726  Priority_Control  node_priority;
727
728  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
729  node_priority = _Scheduler_SMP_Node_priority( node );
730
731  if ( ( *order )( &node_priority, &lowest_scheduled->Node.Chain ) ) {
732    _Scheduler_SMP_Enqueue_to_scheduled(
733      context,
734      node,
735      lowest_scheduled,
736      insert_scheduled,
737      move_from_scheduled_to_ready,
738      allocate_processor
739    );
740    needs_help = false;
741  } else {
742    ( *insert_ready )( context, node );
743    needs_help = true;
744  }
745
746  return needs_help;
747}
748
749/**
750 * @brief Enqueues a scheduled node according to the specified order
751 * function.
752 *
753 * @param[in] context The scheduler instance context.
754 * @param[in] node The node to enqueue.
755 * @param[in] order The order function.
756 * @param[in] extract_from_ready Function to extract a node from the set of
757 *   ready nodes.
758 * @param[in] get_highest_ready Function to get the highest ready node.
759 * @param[in] insert_ready Function to insert a node into the set of ready
760 *   nodes.
761 * @param[in] insert_scheduled Function to insert a node into the set of
762 *   scheduled nodes.
763 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
764 *   of ready nodes to the set of scheduled nodes.
765 * @param[in] allocate_processor Function to allocate a processor to a node
766 *   based on the rules of the scheduler.
767 */
768static inline bool _Scheduler_SMP_Enqueue_scheduled_ordered(
769  Scheduler_Context                *context,
770  Scheduler_Node                   *node,
771  Chain_Node_order                  order,
772  Scheduler_SMP_Extract             extract_from_ready,
773  Scheduler_SMP_Get_highest_ready   get_highest_ready,
774  Scheduler_SMP_Insert              insert_ready,
775  Scheduler_SMP_Insert              insert_scheduled,
776  Scheduler_SMP_Move                move_from_ready_to_scheduled,
777  Scheduler_SMP_Allocate_processor  allocate_processor
778)
779{
780  while ( true ) {
781    Scheduler_Node                   *highest_ready;
782    Scheduler_Try_to_schedule_action  action;
783    Priority_Control                  node_priority;
784
785    highest_ready = ( *get_highest_ready )( context, node );
786    node_priority = _Scheduler_SMP_Node_priority( node );
787
788    /*
789     * The node has been extracted from the scheduled chain.  We have to place
790     * it now on the scheduled or ready set.
791     */
792    if (
793      node->sticky_level > 0
794        && ( *order )( &node_priority, &highest_ready->Node.Chain )
795    ) {
796      ( *insert_scheduled )( context, node );
797
798      if ( _Scheduler_Node_get_idle( node ) != NULL ) {
799        Thread_Control   *owner;
800        ISR_lock_Context  lock_context;
801
802        owner = _Scheduler_Node_get_owner( node );
803        _Thread_Scheduler_acquire_critical( owner, &lock_context );
804
805        if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
806          _Thread_Scheduler_cancel_need_for_help(
807            owner,
808            _Thread_Get_CPU( owner )
809          );
810          _Scheduler_Discard_idle_thread(
811            context,
812            owner,
813            node,
814            _Scheduler_SMP_Release_idle_thread
815          );
816          _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
817        }
818
819        _Thread_Scheduler_release_critical( owner, &lock_context );
820      }
821
822      return false;
823    }
824
825    action = _Scheduler_Try_to_schedule_node(
826      context,
827      highest_ready,
828      _Scheduler_Node_get_idle( node ),
829      _Scheduler_SMP_Get_idle_thread
830    );
831
832    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
833      Thread_Control *idle;
834
835      _Scheduler_SMP_Preempt(
836        context,
837        highest_ready,
838        node,
839        allocate_processor
840      );
841
842      ( *insert_ready )( context, node );
843      ( *move_from_ready_to_scheduled )( context, highest_ready );
844
845      idle = _Scheduler_Release_idle_thread(
846        context,
847        node,
848        _Scheduler_SMP_Release_idle_thread
849      );
850      return ( idle == NULL );
851    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
852      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
853      _Scheduler_SMP_Node_change_state(
854        highest_ready,
855        SCHEDULER_SMP_NODE_SCHEDULED
856      );
857
858      ( *insert_ready )( context, node );
859      ( *move_from_ready_to_scheduled )( context, highest_ready );
860
861      _Scheduler_Exchange_idle_thread(
862        highest_ready,
863        node,
864        _Scheduler_Node_get_idle( node )
865      );
866      return false;
867    } else {
868      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
869
870      _Scheduler_SMP_Node_change_state(
871        highest_ready,
872        SCHEDULER_SMP_NODE_BLOCKED
873      );
874
875      ( *extract_from_ready )( context, highest_ready );
876    }
877  }
878}
879
880static inline void _Scheduler_SMP_Extract_from_scheduled(
881  Scheduler_Node *node
882)
883{
884  _Chain_Extract_unprotected( &node->Node.Chain );
885}
886
887static inline void _Scheduler_SMP_Schedule_highest_ready(
888  Scheduler_Context                *context,
889  Scheduler_Node                   *victim,
890  Per_CPU_Control                  *victim_cpu,
891  Scheduler_SMP_Extract             extract_from_ready,
892  Scheduler_SMP_Get_highest_ready   get_highest_ready,
893  Scheduler_SMP_Move                move_from_ready_to_scheduled,
894  Scheduler_SMP_Allocate_processor  allocate_processor
895)
896{
897  Scheduler_Try_to_schedule_action action;
898
899  do {
900    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
901
902    action = _Scheduler_Try_to_schedule_node(
903      context,
904      highest_ready,
905      NULL,
906      _Scheduler_SMP_Get_idle_thread
907    );
908
909    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
910      _Scheduler_SMP_Allocate_processor(
911        context,
912        highest_ready,
913        victim,
914        victim_cpu,
915        allocate_processor
916      );
917
918      ( *move_from_ready_to_scheduled )( context, highest_ready );
919    } else {
920      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
921
922      _Scheduler_SMP_Node_change_state(
923        highest_ready,
924        SCHEDULER_SMP_NODE_BLOCKED
925      );
926
927      ( *extract_from_ready )( context, highest_ready );
928    }
929  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
930}
931
932static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
933  Scheduler_Context                *context,
934  Scheduler_Node                   *victim,
935  Per_CPU_Control                  *victim_cpu,
936  Scheduler_SMP_Extract             extract_from_ready,
937  Scheduler_SMP_Get_highest_ready   get_highest_ready,
938  Scheduler_SMP_Move                move_from_ready_to_scheduled,
939  Scheduler_SMP_Allocate_processor  allocate_processor
940)
941{
942  Scheduler_Try_to_schedule_action action;
943
944  do {
945    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
946
947    action = _Scheduler_Try_to_schedule_node(
948      context,
949      highest_ready,
950      NULL,
951      _Scheduler_SMP_Get_idle_thread
952    );
953
954    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
955      _Scheduler_SMP_Preempt(
956        context,
957        highest_ready,
958        victim,
959        allocate_processor
960      );
961
962      ( *move_from_ready_to_scheduled )( context, highest_ready );
963    } else {
964      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
965
966      _Scheduler_SMP_Node_change_state(
967        highest_ready,
968        SCHEDULER_SMP_NODE_BLOCKED
969      );
970
971      ( *extract_from_ready )( context, highest_ready );
972    }
973  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
974}
975
976/**
977 * @brief Blocks a thread.
978 *
979 * @param[in] context The scheduler instance context.
980 * @param[in] thread The thread of the scheduling operation.
981 * @param[in] node The scheduler node of the thread to block.
982 * @param[in] extract_from_ready Function to extract a node from the set of
983 *   ready nodes.
984 * @param[in] get_highest_ready Function to get the highest ready node.
985 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
986 *   of ready nodes to the set of scheduled nodes.
987 */
988static inline void _Scheduler_SMP_Block(
989  Scheduler_Context                *context,
990  Thread_Control                   *thread,
991  Scheduler_Node                   *node,
992  Scheduler_SMP_Extract             extract_from_ready,
993  Scheduler_SMP_Get_highest_ready   get_highest_ready,
994  Scheduler_SMP_Move                move_from_ready_to_scheduled,
995  Scheduler_SMP_Allocate_processor  allocate_processor
996)
997{
998  Scheduler_SMP_Node_state  node_state;
999  Per_CPU_Control          *thread_cpu;
1000
1001  node_state = _Scheduler_SMP_Node_state( node );
1002
1003  thread_cpu = _Scheduler_Block_node(
1004    context,
1005    thread,
1006    node,
1007    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1008    _Scheduler_SMP_Get_idle_thread
1009  );
1010
1011  if ( thread_cpu != NULL ) {
1012    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1013
1014    if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1015      _Scheduler_SMP_Extract_from_scheduled( node );
1016      _Scheduler_SMP_Schedule_highest_ready(
1017        context,
1018        node,
1019        thread_cpu,
1020        extract_from_ready,
1021        get_highest_ready,
1022        move_from_ready_to_scheduled,
1023        allocate_processor
1024      );
1025    } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1026      ( *extract_from_ready )( context, node );
1027    }
1028  }
1029}
1030
1031static inline void _Scheduler_SMP_Unblock(
1032  Scheduler_Context     *context,
1033  Thread_Control        *thread,
1034  Scheduler_Node        *node,
1035  Scheduler_SMP_Update   update,
1036  Scheduler_SMP_Enqueue  enqueue_fifo
1037)
1038{
1039  Scheduler_SMP_Node_state  node_state;
1040  bool                      unblock;
1041
1042  node_state = _Scheduler_SMP_Node_state( node );
1043  unblock = _Scheduler_Unblock_node(
1044    context,
1045    thread,
1046    node,
1047    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1048    _Scheduler_SMP_Release_idle_thread
1049  );
1050
1051  if ( unblock ) {
1052    Priority_Control new_priority;
1053    bool             prepend_it;
1054    bool             needs_help;
1055
1056    new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
1057    (void) prepend_it;
1058
1059    if ( new_priority != _Scheduler_SMP_Node_priority( node ) ) {
1060      ( *update )( context, node, new_priority );
1061    }
1062
1063    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1064      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1065
1066      needs_help = ( *enqueue_fifo )( context, node );
1067    } else {
1068      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1069      _Assert( node->sticky_level > 0 );
1070      _Assert( node->idle == NULL );
1071      needs_help = true;
1072    }
1073
1074    if ( needs_help ) {
1075      _Scheduler_Ask_for_help( thread );
1076    }
1077  }
1078}
1079
1080static inline void _Scheduler_SMP_Update_priority(
1081  Scheduler_Context          *context,
1082  Thread_Control             *thread,
1083  Scheduler_Node             *node,
1084  Scheduler_SMP_Extract       extract_from_ready,
1085  Scheduler_SMP_Update        update,
1086  Scheduler_SMP_Enqueue       enqueue_fifo,
1087  Scheduler_SMP_Enqueue       enqueue_lifo,
1088  Scheduler_SMP_Enqueue       enqueue_scheduled_fifo,
1089  Scheduler_SMP_Enqueue       enqueue_scheduled_lifo,
1090  Scheduler_SMP_Ask_for_help  ask_for_help
1091)
1092{
1093  Priority_Control         new_priority;
1094  bool                     prepend_it;
1095  Scheduler_SMP_Node_state node_state;
1096
1097  new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
1098
1099  if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) {
1100    if ( _Thread_Is_ready( thread ) ) {
1101      ( *ask_for_help )( context, thread, node );
1102    }
1103
1104    return;
1105  }
1106
1107  node_state = _Scheduler_SMP_Node_state( node );
1108
1109  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1110    _Scheduler_SMP_Extract_from_scheduled( node );
1111
1112    ( *update )( context, node, new_priority );
1113
1114    if ( prepend_it ) {
1115      ( *enqueue_scheduled_lifo )( context, node );
1116    } else {
1117      ( *enqueue_scheduled_fifo )( context, node );
1118    }
1119  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1120    ( *extract_from_ready )( context, node );
1121
1122    ( *update )( context, node, new_priority );
1123
1124    if ( prepend_it ) {
1125      ( *enqueue_lifo )( context, node );
1126    } else {
1127      ( *enqueue_fifo )( context, node );
1128    }
1129  } else {
1130    ( *update )( context, node, new_priority );
1131
1132    if ( _Thread_Is_ready( thread ) ) {
1133      ( *ask_for_help )( context, thread, node );
1134    }
1135  }
1136}
1137
1138static inline void _Scheduler_SMP_Yield(
1139  Scheduler_Context     *context,
1140  Thread_Control        *thread,
1141  Scheduler_Node        *node,
1142  Scheduler_SMP_Extract  extract_from_ready,
1143  Scheduler_SMP_Enqueue  enqueue_fifo,
1144  Scheduler_SMP_Enqueue  enqueue_scheduled_fifo
1145)
1146{
1147  bool                     needs_help;
1148  Scheduler_SMP_Node_state node_state;
1149
1150  node_state = _Scheduler_SMP_Node_state( node );
1151
1152  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1153    _Scheduler_SMP_Extract_from_scheduled( node );
1154    ( *enqueue_scheduled_fifo )( context, node );
1155    needs_help = false;
1156  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1157    ( *extract_from_ready )( context, node );
1158
1159    needs_help = ( *enqueue_fifo )( context, node );
1160  } else {
1161    needs_help = true;
1162  }
1163
1164  if ( needs_help ) {
1165    _Scheduler_Ask_for_help( thread );
1166  }
1167}
1168
1169static inline void _Scheduler_SMP_Insert_scheduled_lifo(
1170  Scheduler_Context *context,
1171  Scheduler_Node    *node_to_insert
1172)
1173{
1174  Scheduler_SMP_Context *self;
1175  Priority_Control       priority_to_insert;
1176
1177  self = _Scheduler_SMP_Get_self( context );
1178  priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
1179
1180  _Chain_Insert_ordered_unprotected(
1181    &self->Scheduled,
1182    &node_to_insert->Node.Chain,
1183    &priority_to_insert,
1184    _Scheduler_SMP_Insert_priority_lifo_order
1185  );
1186}
1187
1188static inline void _Scheduler_SMP_Insert_scheduled_fifo(
1189  Scheduler_Context *context,
1190  Scheduler_Node    *node_to_insert
1191)
1192{
1193  Scheduler_SMP_Context *self;
1194  Priority_Control       priority_to_insert;
1195
1196  self = _Scheduler_SMP_Get_self( context );
1197  priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
1198
1199  _Chain_Insert_ordered_unprotected(
1200    &self->Scheduled,
1201    &node_to_insert->Node.Chain,
1202    &priority_to_insert,
1203    _Scheduler_SMP_Insert_priority_fifo_order
1204  );
1205}
1206
1207static inline bool _Scheduler_SMP_Ask_for_help(
1208  Scheduler_Context                  *context,
1209  Thread_Control                     *thread,
1210  Scheduler_Node                     *node,
1211  Chain_Node_order                    order,
1212  Scheduler_SMP_Insert                insert_ready,
1213  Scheduler_SMP_Insert                insert_scheduled,
1214  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1215  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1216  Scheduler_SMP_Allocate_processor    allocate_processor
1217)
1218{
1219  Scheduler_Node   *lowest_scheduled;
1220  ISR_lock_Context  lock_context;
1221  bool              success;
1222
1223  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1224
1225  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1226
1227  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1228    Scheduler_SMP_Node_state node_state;
1229
1230    node_state = _Scheduler_SMP_Node_state( node );
1231
1232    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1233      Priority_Control node_priority;
1234
1235      node_priority = _Scheduler_SMP_Node_priority( node );
1236
1237      if ( ( *order )( &node_priority, &lowest_scheduled->Node.Chain ) ) {
1238        _Thread_Scheduler_cancel_need_for_help(
1239          thread,
1240          _Thread_Get_CPU( thread )
1241        );
1242        _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1243        _Thread_Scheduler_release_critical( thread, &lock_context );
1244
1245        _Scheduler_SMP_Preempt(
1246          context,
1247          node,
1248          lowest_scheduled,
1249          allocate_processor
1250        );
1251
1252        ( *insert_scheduled )( context, node );
1253        ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1254
1255        _Scheduler_Release_idle_thread(
1256          context,
1257          lowest_scheduled,
1258          _Scheduler_SMP_Release_idle_thread
1259        );
1260        success = true;
1261      } else {
1262        _Thread_Scheduler_release_critical( thread, &lock_context );
1263        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1264        ( *insert_ready )( context, node );
1265        success = false;
1266      }
1267    } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1268      _Thread_Scheduler_cancel_need_for_help(
1269        thread,
1270        _Thread_Get_CPU( thread )
1271      );
1272      _Scheduler_Discard_idle_thread(
1273        context,
1274        thread,
1275        node,
1276        _Scheduler_SMP_Release_idle_thread
1277      );
1278      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1279      _Thread_Scheduler_release_critical( thread, &lock_context );
1280      success = true;
1281    } else {
1282      _Thread_Scheduler_release_critical( thread, &lock_context );
1283      success = false;
1284    }
1285  } else {
1286    _Thread_Scheduler_release_critical( thread, &lock_context );
1287    success = false;
1288  }
1289
1290  return success;
1291}
1292
1293static inline void _Scheduler_SMP_Reconsider_help_request(
1294  Scheduler_Context     *context,
1295  Thread_Control        *thread,
1296  Scheduler_Node        *node,
1297  Scheduler_SMP_Extract  extract_from_ready
1298)
1299{
1300  ISR_lock_Context lock_context;
1301
1302  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1303
1304  if (
1305    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1306      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1307      && node->sticky_level == 1
1308  ) {
1309    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1310    ( *extract_from_ready )( context, node );
1311  }
1312
1313  _Thread_Scheduler_release_critical( thread, &lock_context );
1314}
1315
1316static inline void _Scheduler_SMP_Withdraw_node(
1317  Scheduler_Context                *context,
1318  Thread_Control                   *thread,
1319  Scheduler_Node                   *node,
1320  Thread_Scheduler_state            next_state,
1321  Scheduler_SMP_Extract             extract_from_ready,
1322  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1323  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1324  Scheduler_SMP_Allocate_processor  allocate_processor
1325)
1326{
1327  ISR_lock_Context         lock_context;
1328  Scheduler_SMP_Node_state node_state;
1329
1330  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1331
1332  node_state = _Scheduler_SMP_Node_state( node );
1333  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1334
1335  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1336    Per_CPU_Control *thread_cpu;
1337
1338    thread_cpu = _Thread_Get_CPU( thread );
1339    _Scheduler_Thread_change_state( thread, next_state );
1340    _Thread_Scheduler_release_critical( thread, &lock_context );
1341
1342    _Scheduler_SMP_Extract_from_scheduled( node );
1343    _Scheduler_SMP_Schedule_highest_ready(
1344      context,
1345      node,
1346      thread_cpu,
1347      extract_from_ready,
1348      get_highest_ready,
1349      move_from_ready_to_scheduled,
1350      allocate_processor
1351    );
1352  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1353    _Thread_Scheduler_release_critical( thread, &lock_context );
1354    ( *extract_from_ready )( context, node );
1355  } else {
1356    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1357    _Thread_Scheduler_release_critical( thread, &lock_context );
1358  }
1359}
1360
1361static inline void _Scheduler_SMP_Do_start_idle(
1362  Scheduler_Context           *context,
1363  Thread_Control              *idle,
1364  Per_CPU_Control             *cpu,
1365  Scheduler_SMP_Register_idle  register_idle
1366)
1367{
1368  Scheduler_SMP_Context *self;
1369  Scheduler_SMP_Node    *node;
1370
1371  self = _Scheduler_SMP_Get_self( context );
1372  node = _Scheduler_SMP_Thread_get_node( idle );
1373
1374  _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
1375  node->state = SCHEDULER_SMP_NODE_SCHEDULED;
1376
1377  _Thread_Set_CPU( idle, cpu );
1378  ( *register_idle )( context, &node->Base, cpu );
1379  _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
1380  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1381}
1382
1383static inline void _Scheduler_SMP_Add_processor(
1384  Scheduler_Context           *context,
1385  Thread_Control              *idle,
1386  Scheduler_SMP_Has_ready      has_ready,
1387  Scheduler_SMP_Enqueue        enqueue_scheduled_fifo,
1388  Scheduler_SMP_Register_idle  register_idle
1389)
1390{
1391  Scheduler_SMP_Context *self;
1392  Scheduler_Node        *node;
1393
1394  self = _Scheduler_SMP_Get_self( context );
1395  idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1396  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1397  node = _Thread_Scheduler_get_home_node( idle );
1398  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
1399  ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
1400
1401  if ( ( *has_ready )( &self->Base ) ) {
1402    ( *enqueue_scheduled_fifo )( &self->Base, node );
1403  } else {
1404    _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
1405  }
1406}
1407
1408static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1409  Scheduler_Context     *context,
1410  Per_CPU_Control       *cpu,
1411  Scheduler_SMP_Extract  extract_from_ready,
1412  Scheduler_SMP_Enqueue  enqueue_fifo
1413)
1414{
1415  Scheduler_SMP_Context *self;
1416  Chain_Node            *chain_node;
1417  Scheduler_Node        *victim_node;
1418  Thread_Control        *victim_user;
1419  Thread_Control        *victim_owner;
1420  Thread_Control        *idle;
1421
1422  self = _Scheduler_SMP_Get_self( context );
1423  chain_node = _Chain_First( &self->Scheduled );
1424
1425  do {
1426    _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1427    victim_node = (Scheduler_Node *) chain_node;
1428    victim_user = _Scheduler_Node_get_user( victim_node );
1429    chain_node = _Chain_Next( chain_node );
1430  } while ( _Thread_Get_CPU( victim_user ) != cpu );
1431
1432  _Scheduler_SMP_Extract_from_scheduled( victim_node );
1433  victim_owner = _Scheduler_Node_get_owner( victim_node );
1434
1435  if ( !victim_owner->is_idle ) {
1436    Scheduler_Node *idle_node;
1437
1438    _Scheduler_Release_idle_thread(
1439      &self->Base,
1440      victim_node,
1441      _Scheduler_SMP_Release_idle_thread
1442    );
1443    idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
1444    idle_node = _Thread_Scheduler_get_home_node( idle );
1445    ( *extract_from_ready )( &self->Base, idle_node );
1446    _Scheduler_SMP_Preempt(
1447      &self->Base,
1448      idle_node,
1449      victim_node,
1450      _Scheduler_SMP_Allocate_processor_exact
1451    );
1452
1453    if ( !_Chain_Is_empty( &self->Scheduled ) ) {
1454      ( *enqueue_fifo )( context, victim_node );
1455    }
1456  } else {
1457    _Assert( victim_owner == victim_user );
1458    _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
1459    idle = victim_owner;
1460    _Scheduler_SMP_Exctract_idle_thread( idle );
1461  }
1462
1463  return idle;
1464}
1465
1466static inline void _Scheduler_SMP_Set_affinity(
1467  Scheduler_Context               *context,
1468  Thread_Control                  *thread,
1469  Scheduler_Node                  *node,
1470  void                            *arg,
1471  Scheduler_SMP_Set_affinity       set_affinity,
1472  Scheduler_SMP_Extract            extract_from_ready,
1473  Scheduler_SMP_Get_highest_ready  get_highest_ready,
1474  Scheduler_SMP_Move               move_from_ready_to_scheduled,
1475  Scheduler_SMP_Enqueue            enqueue_fifo,
1476  Scheduler_SMP_Allocate_processor allocate_processor
1477)
1478{
1479  Scheduler_SMP_Node_state node_state;
1480
1481  node_state = _Scheduler_SMP_Node_state( node );
1482
1483  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1484    _Scheduler_SMP_Extract_from_scheduled( node );
1485    _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1486      context,
1487      node,
1488      _Thread_Get_CPU( thread ),
1489      extract_from_ready,
1490      get_highest_ready,
1491      move_from_ready_to_scheduled,
1492      allocate_processor
1493    );
1494    ( *set_affinity )( context, node, arg );
1495    ( *enqueue_fifo )( context, node );
1496  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1497    ( *extract_from_ready )( context, node );
1498    ( *set_affinity )( context, node, arg );
1499    ( *enqueue_fifo )( context, node );
1500  } else {
1501    ( *set_affinity )( context, node, arg );
1502  }
1503}
1504
1505/** @} */
1506
1507#ifdef __cplusplus
1508}
1509#endif /* __cplusplus */
1510
1511#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.