source: rtems/cpukit/include/rtems/score/schedulersmpimpl.h @ 3aad9d9b

5
Last change on this file since 3aad9d9b was 3aad9d9b, checked in by Sebastian Huber <sebastian.huber@…>, on 09/03/18 at 07:31:19

score: Generalize SMP scheduler block support

Add extract from scheduled function to the _Scheduler_SMP_Block()
operation. This allows a scheduler implementation to do extra work in
case a scheduled node is blocked.

  • Property mode set to 100644
File size: 41.2 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013, 2017 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30#include <rtems/bspIo.h>
31
32#ifdef __cplusplus
33extern "C" {
34#endif /* __cplusplus */
35
36/**
37 * @addtogroup ScoreSchedulerSMP
38 *
39 * The scheduler nodes can be in four states
40 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
41 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
42 * - @ref SCHEDULER_SMP_NODE_READY.
43 *
44 * State transitions are triggered via basic operations
45 * - _Scheduler_SMP_Enqueue(),
46 * - _Scheduler_SMP_Enqueue_scheduled(), and
47 * - _Scheduler_SMP_Block().
48 *
49 * @dot
50 * digraph {
51 *   node [style="filled"];
52 *
53 *   bs [label="BLOCKED"];
54 *   ss [label="SCHEDULED", fillcolor="green"];
55 *   rs [label="READY", fillcolor="red"];
56 *
57 *   edge [label="enqueue"];
58 *   edge [fontcolor="darkgreen", color="darkgreen"];
59 *
60 *   bs -> ss;
61 *
62 *   edge [fontcolor="red", color="red"];
63 *
64 *   bs -> rs;
65 *
66 *   edge [label="enqueue other"];
67 *
68 *   ss -> rs;
69 *
70 *   edge [label="block"];
71 *   edge [fontcolor="black", color="black"];
72 *
73 *   ss -> bs;
74 *   rs -> bs;
75 *
76 *   edge [label="block other"];
77 *   edge [fontcolor="darkgreen", color="darkgreen"];
78 *
79 *   rs -> ss;
80 * }
81 * @enddot
82 *
83 * During system initialization each processor of the scheduler instance starts
84 * with an idle thread assigned to it.  Lets have a look at an example with two
85 * idle threads I and J with priority 5.  We also have blocked threads A, B and
86 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
87 * with respect to the thread priority from left to right in the below
88 * diagrams.  The highest priority node (lowest priority number) is the
89 * leftmost node.  Since the processor assignment is independent of the thread
90 * priority the processor indices may move from one state to the other.
91 *
92 * @dot
93 * digraph {
94 *   node [style="filled"];
95 *   edge [dir="none"];
96 *   subgraph {
97 *     rank = same;
98 *
99 *     i [label="I (5)", fillcolor="green"];
100 *     j [label="J (5)", fillcolor="green"];
101 *     a [label="A (1)"];
102 *     b [label="B (2)"];
103 *     c [label="C (3)"];
104 *     i -> j;
105 *   }
106 *
107 *   subgraph {
108 *     rank = same;
109 *
110 *     p0 [label="PROCESSOR 0", shape="box"];
111 *     p1 [label="PROCESSOR 1", shape="box"];
112 *   }
113 *
114 *   i -> p0;
115 *   j -> p1;
116 * }
117 * @enddot
118 *
119 * Lets start A.  For this an enqueue operation is performed.
120 *
121 * @dot
122 * digraph {
123 *   node [style="filled"];
124 *   edge [dir="none"];
125 *
126 *   subgraph {
127 *     rank = same;
128 *
129 *     i [label="I (5)", fillcolor="green"];
130 *     j [label="J (5)", fillcolor="red"];
131 *     a [label="A (1)", fillcolor="green"];
132 *     b [label="B (2)"];
133 *     c [label="C (3)"];
134 *     a -> i;
135 *   }
136 *
137 *   subgraph {
138 *     rank = same;
139 *
140 *     p0 [label="PROCESSOR 0", shape="box"];
141 *     p1 [label="PROCESSOR 1", shape="box"];
142 *   }
143 *
144 *   i -> p0;
145 *   a -> p1;
146 * }
147 * @enddot
148 *
149 * Lets start C.
150 *
151 * @dot
152 * digraph {
153 *   node [style="filled"];
154 *   edge [dir="none"];
155 *
156 *   subgraph {
157 *     rank = same;
158 *
159 *     a [label="A (1)", fillcolor="green"];
160 *     c [label="C (3)", fillcolor="green"];
161 *     i [label="I (5)", fillcolor="red"];
162 *     j [label="J (5)", fillcolor="red"];
163 *     b [label="B (2)"];
164 *     a -> c;
165 *     i -> j;
166 *   }
167 *
168 *   subgraph {
169 *     rank = same;
170 *
171 *     p0 [label="PROCESSOR 0", shape="box"];
172 *     p1 [label="PROCESSOR 1", shape="box"];
173 *   }
174 *
175 *   c -> p0;
176 *   a -> p1;
177 * }
178 * @enddot
179 *
180 * Lets start B.
181 *
182 * @dot
183 * digraph {
184 *   node [style="filled"];
185 *   edge [dir="none"];
186 *
187 *   subgraph {
188 *     rank = same;
189 *
190 *     a [label="A (1)", fillcolor="green"];
191 *     b [label="B (2)", fillcolor="green"];
192 *     c [label="C (3)", fillcolor="red"];
193 *     i [label="I (5)", fillcolor="red"];
194 *     j [label="J (5)", fillcolor="red"];
195 *     a -> b;
196 *     c -> i -> j;
197 *   }
198 *
199 *   subgraph {
200 *     rank = same;
201 *
202 *     p0 [label="PROCESSOR 0", shape="box"];
203 *     p1 [label="PROCESSOR 1", shape="box"];
204 *   }
205 *
206 *   b -> p0;
207 *   a -> p1;
208 * }
209 * @enddot
210 *
211 * Lets change the priority of thread A to 4.
212 *
213 * @dot
214 * digraph {
215 *   node [style="filled"];
216 *   edge [dir="none"];
217 *
218 *   subgraph {
219 *     rank = same;
220 *
221 *     b [label="B (2)", fillcolor="green"];
222 *     c [label="C (3)", fillcolor="green"];
223 *     a [label="A (4)", fillcolor="red"];
224 *     i [label="I (5)", fillcolor="red"];
225 *     j [label="J (5)", fillcolor="red"];
226 *     b -> c;
227 *     a -> i -> j;
228 *   }
229 *
230 *   subgraph {
231 *     rank = same;
232 *
233 *     p0 [label="PROCESSOR 0", shape="box"];
234 *     p1 [label="PROCESSOR 1", shape="box"];
235 *   }
236 *
237 *   b -> p0;
238 *   c -> p1;
239 * }
240 * @enddot
241 *
242 * Now perform a blocking operation with thread B.  Please note that thread A
243 * migrated now from processor 0 to processor 1 and thread C still executes on
244 * processor 1.
245 *
246 * @dot
247 * digraph {
248 *   node [style="filled"];
249 *   edge [dir="none"];
250 *
251 *   subgraph {
252 *     rank = same;
253 *
254 *     c [label="C (3)", fillcolor="green"];
255 *     a [label="A (4)", fillcolor="green"];
256 *     i [label="I (5)", fillcolor="red"];
257 *     j [label="J (5)", fillcolor="red"];
258 *     b [label="B (2)"];
259 *     c -> a;
260 *     i -> j;
261 *   }
262 *
263 *   subgraph {
264 *     rank = same;
265 *
266 *     p0 [label="PROCESSOR 0", shape="box"];
267 *     p1 [label="PROCESSOR 1", shape="box"];
268 *   }
269 *
270 *   a -> p0;
271 *   c -> p1;
272 * }
273 * @enddot
274 *
275 * @{
276 */
277
278typedef bool ( *Scheduler_SMP_Has_ready )(
279  Scheduler_Context *context
280);
281
282typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
283  Scheduler_Context *context,
284  Scheduler_Node    *node
285);
286
287typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
288  Scheduler_Context *context,
289  Scheduler_Node    *filter
290);
291
292typedef void ( *Scheduler_SMP_Extract )(
293  Scheduler_Context *context,
294  Scheduler_Node    *node_to_extract
295);
296
297typedef void ( *Scheduler_SMP_Insert )(
298  Scheduler_Context *context,
299  Scheduler_Node    *node_to_insert,
300  Priority_Control   insert_priority
301);
302
303typedef void ( *Scheduler_SMP_Move )(
304  Scheduler_Context *context,
305  Scheduler_Node    *node_to_move
306);
307
308typedef bool ( *Scheduler_SMP_Ask_for_help )(
309  Scheduler_Context *context,
310  Thread_Control    *thread,
311  Scheduler_Node    *node
312);
313
314typedef void ( *Scheduler_SMP_Update )(
315  Scheduler_Context *context,
316  Scheduler_Node    *node_to_update,
317  Priority_Control   new_priority
318);
319
320typedef void ( *Scheduler_SMP_Set_affinity )(
321  Scheduler_Context *context,
322  Scheduler_Node    *node,
323  void              *arg
324);
325
326typedef bool ( *Scheduler_SMP_Enqueue )(
327  Scheduler_Context *context,
328  Scheduler_Node    *node_to_enqueue,
329  Priority_Control   priority
330);
331
332typedef void ( *Scheduler_SMP_Allocate_processor )(
333  Scheduler_Context *context,
334  Scheduler_Node    *scheduled,
335  Scheduler_Node    *victim,
336  Per_CPU_Control   *victim_cpu
337);
338
339typedef void ( *Scheduler_SMP_Register_idle )(
340  Scheduler_Context *context,
341  Scheduler_Node    *idle,
342  Per_CPU_Control   *cpu
343);
344
345static inline void _Scheduler_SMP_Do_nothing_register_idle(
346  Scheduler_Context *context,
347  Scheduler_Node    *idle,
348  Per_CPU_Control   *cpu
349)
350{
351  (void) context;
352  (void) idle;
353  (void) cpu;
354}
355
356static inline bool _Scheduler_SMP_Priority_less_equal(
357  const void       *to_insert,
358  const Chain_Node *next
359)
360{
361  const Priority_Control   *priority_to_insert;
362  const Scheduler_SMP_Node *node_next;
363
364  priority_to_insert = (const Priority_Control *) to_insert;
365  node_next = (const Scheduler_SMP_Node *) next;
366
367  return *priority_to_insert <= node_next->priority;
368}
369
370static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
371  Scheduler_Context *context
372)
373{
374  return (Scheduler_SMP_Context *) context;
375}
376
377static inline void _Scheduler_SMP_Initialize(
378  Scheduler_SMP_Context *self
379)
380{
381  _Chain_Initialize_empty( &self->Scheduled );
382  _Chain_Initialize_empty( &self->Idle_threads );
383}
384
385static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
386  Thread_Control *thread
387)
388{
389  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
390}
391
392static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
393  Thread_Control *thread
394)
395{
396  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
397}
398
399static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
400  Scheduler_Node *node
401)
402{
403  return (Scheduler_SMP_Node *) node;
404}
405
406static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
407  const Scheduler_Node *node
408)
409{
410  return ( (const Scheduler_SMP_Node *) node )->state;
411}
412
413static inline Priority_Control _Scheduler_SMP_Node_priority(
414  const Scheduler_Node *node
415)
416{
417  return ( (const Scheduler_SMP_Node *) node )->priority;
418}
419
420static inline void _Scheduler_SMP_Node_initialize(
421  const Scheduler_Control *scheduler,
422  Scheduler_SMP_Node      *node,
423  Thread_Control          *thread,
424  Priority_Control         priority
425)
426{
427  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
428  node->state = SCHEDULER_SMP_NODE_BLOCKED;
429  node->priority = priority;
430}
431
432static inline void _Scheduler_SMP_Node_update_priority(
433  Scheduler_SMP_Node *node,
434  Priority_Control    new_priority
435)
436{
437  node->priority = new_priority;
438}
439
440static inline void _Scheduler_SMP_Node_change_state(
441  Scheduler_Node           *node,
442  Scheduler_SMP_Node_state  new_state
443)
444{
445  Scheduler_SMP_Node *the_node;
446
447  the_node = _Scheduler_SMP_Node_downcast( node );
448  the_node->state = new_state;
449}
450
451static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
452  const Scheduler_Context *context,
453  const Per_CPU_Control   *cpu
454)
455{
456  return cpu->Scheduler.context == context;
457}
458
459static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
460  Scheduler_Context *context
461)
462{
463  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
464  Thread_Control *idle = (Thread_Control *)
465    _Chain_Get_first_unprotected( &self->Idle_threads );
466
467  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
468
469  return idle;
470}
471
472static inline void _Scheduler_SMP_Release_idle_thread(
473  Scheduler_Context *context,
474  Thread_Control    *idle
475)
476{
477  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
478
479  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
480}
481
482static inline void _Scheduler_SMP_Exctract_idle_thread(
483  Thread_Control *idle
484)
485{
486  _Chain_Extract_unprotected( &idle->Object.Node );
487}
488
489static inline void _Scheduler_SMP_Allocate_processor_lazy(
490  Scheduler_Context *context,
491  Scheduler_Node    *scheduled,
492  Scheduler_Node    *victim,
493  Per_CPU_Control   *victim_cpu
494)
495{
496  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
497  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
498  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
499  Per_CPU_Control *cpu_self = _Per_CPU_Get();
500  Thread_Control *heir;
501
502  _Assert( _ISR_Get_level() != 0 );
503
504  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
505    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
506      heir = scheduled_cpu->heir;
507      _Thread_Dispatch_update_heir(
508        cpu_self,
509        scheduled_cpu,
510        scheduled_thread
511      );
512    } else {
513      /* We have to force a migration to our processor set */
514      heir = scheduled_thread;
515    }
516  } else {
517    heir = scheduled_thread;
518  }
519
520  if ( heir != victim_thread ) {
521    _Thread_Set_CPU( heir, victim_cpu );
522    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
523  }
524}
525
526/*
527 * This method is slightly different from
528 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
529 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
530 * but does not take into account affinity.
531 */
532static inline void _Scheduler_SMP_Allocate_processor_exact(
533  Scheduler_Context *context,
534  Scheduler_Node    *scheduled,
535  Scheduler_Node    *victim,
536  Per_CPU_Control   *victim_cpu
537)
538{
539  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
540  Per_CPU_Control *cpu_self = _Per_CPU_Get();
541
542  (void) context;
543  (void) victim;
544
545  _Thread_Set_CPU( scheduled_thread, victim_cpu );
546  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
547}
548
549static inline void _Scheduler_SMP_Allocate_processor(
550  Scheduler_Context                *context,
551  Scheduler_Node                   *scheduled,
552  Scheduler_Node                   *victim,
553  Per_CPU_Control                  *victim_cpu,
554  Scheduler_SMP_Allocate_processor  allocate_processor
555)
556{
557  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
558  ( *allocate_processor )( context, scheduled, victim, victim_cpu );
559}
560
561static inline Thread_Control *_Scheduler_SMP_Preempt(
562  Scheduler_Context                *context,
563  Scheduler_Node                   *scheduled,
564  Scheduler_Node                   *victim,
565  Scheduler_SMP_Allocate_processor  allocate_processor
566)
567{
568  Thread_Control   *victim_thread;
569  ISR_lock_Context  lock_context;
570  Per_CPU_Control  *victim_cpu;
571
572  victim_thread = _Scheduler_Node_get_user( victim );
573  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
574
575  _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
576
577  victim_cpu = _Thread_Get_CPU( victim_thread );
578
579  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
580    _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
581
582    if ( victim_thread->Scheduler.helping_nodes > 0 ) {
583      _Per_CPU_Acquire( victim_cpu );
584      _Chain_Append_unprotected(
585        &victim_cpu->Threads_in_need_for_help,
586        &victim_thread->Scheduler.Help_node
587      );
588      _Per_CPU_Release( victim_cpu );
589    }
590  }
591
592  _Thread_Scheduler_release_critical( victim_thread, &lock_context );
593
594  _Scheduler_SMP_Allocate_processor(
595    context,
596    scheduled,
597    victim,
598    victim_cpu,
599    allocate_processor
600  );
601
602  return victim_thread;
603}
604
605static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
606  Scheduler_Context *context,
607  Scheduler_Node    *filter
608)
609{
610  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
611  Chain_Control *scheduled = &self->Scheduled;
612  Scheduler_Node *lowest_scheduled =
613    (Scheduler_Node *) _Chain_Last( scheduled );
614
615  (void) filter;
616
617  _Assert( &lowest_scheduled->Node.Chain != _Chain_Tail( scheduled ) );
618  _Assert(
619    _Chain_Next( &lowest_scheduled->Node.Chain ) == _Chain_Tail( scheduled )
620  );
621
622  return lowest_scheduled;
623}
624
625static inline void _Scheduler_SMP_Enqueue_to_scheduled(
626  Scheduler_Context                *context,
627  Scheduler_Node                   *node,
628  Priority_Control                  priority,
629  Scheduler_Node                   *lowest_scheduled,
630  Scheduler_SMP_Insert              insert_scheduled,
631  Scheduler_SMP_Move                move_from_scheduled_to_ready,
632  Scheduler_SMP_Allocate_processor  allocate_processor
633)
634{
635  Scheduler_Try_to_schedule_action action;
636
637  action = _Scheduler_Try_to_schedule_node(
638    context,
639    node,
640    _Scheduler_Node_get_idle( lowest_scheduled ),
641    _Scheduler_SMP_Get_idle_thread
642  );
643
644  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
645    _Scheduler_SMP_Preempt(
646      context,
647      node,
648      lowest_scheduled,
649      allocate_processor
650    );
651
652    ( *insert_scheduled )( context, node, priority );
653    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
654
655    _Scheduler_Release_idle_thread(
656      context,
657      lowest_scheduled,
658      _Scheduler_SMP_Release_idle_thread
659    );
660  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
661    _Scheduler_SMP_Node_change_state(
662      lowest_scheduled,
663      SCHEDULER_SMP_NODE_READY
664    );
665    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
666
667    ( *insert_scheduled )( context, node, priority );
668    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
669
670    _Scheduler_Exchange_idle_thread(
671      node,
672      lowest_scheduled,
673      _Scheduler_Node_get_idle( lowest_scheduled )
674    );
675  } else {
676    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
677    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
678  }
679}
680
681/**
682 * @brief Enqueues a node according to the specified order function.
683 *
684 * The node must not be in the scheduled state.
685 *
686 * @param[in] context The scheduler instance context.
687 * @param[in] node The node to enqueue.
688 * @param[in] priority The node insert priority.
689 * @param[in] order The order function.
690 * @param[in] insert_ready Function to insert a node into the set of ready
691 *   nodes.
692 * @param[in] insert_scheduled Function to insert a node into the set of
693 *   scheduled nodes.
694 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
695 *   of scheduled nodes to the set of ready nodes.
696 * @param[in] get_lowest_scheduled Function to select the node from the
697 *   scheduled nodes to replace.  It may not be possible to find one, in this
698 *   case a pointer must be returned so that the order functions returns false
699 *   if this pointer is passed as the second argument to the order function.
700 * @param[in] allocate_processor Function to allocate a processor to a node
701 *   based on the rules of the scheduler.
702 */
703static inline bool _Scheduler_SMP_Enqueue(
704  Scheduler_Context                  *context,
705  Scheduler_Node                     *node,
706  Priority_Control                    insert_priority,
707  Chain_Node_order                    order,
708  Scheduler_SMP_Insert                insert_ready,
709  Scheduler_SMP_Insert                insert_scheduled,
710  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
711  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
712  Scheduler_SMP_Allocate_processor    allocate_processor
713)
714{
715  bool            needs_help;
716  Scheduler_Node *lowest_scheduled;
717
718  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
719
720  if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
721    _Scheduler_SMP_Enqueue_to_scheduled(
722      context,
723      node,
724      insert_priority,
725      lowest_scheduled,
726      insert_scheduled,
727      move_from_scheduled_to_ready,
728      allocate_processor
729    );
730    needs_help = false;
731  } else {
732    ( *insert_ready )( context, node, insert_priority );
733    needs_help = true;
734  }
735
736  return needs_help;
737}
738
739/**
740 * @brief Enqueues a scheduled node according to the specified order
741 * function.
742 *
743 * @param[in] context The scheduler instance context.
744 * @param[in] node The node to enqueue.
745 * @param[in] order The order function.
746 * @param[in] extract_from_ready Function to extract a node from the set of
747 *   ready nodes.
748 * @param[in] get_highest_ready Function to get the highest ready node.
749 * @param[in] insert_ready Function to insert a node into the set of ready
750 *   nodes.
751 * @param[in] insert_scheduled Function to insert a node into the set of
752 *   scheduled nodes.
753 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
754 *   of ready nodes to the set of scheduled nodes.
755 * @param[in] allocate_processor Function to allocate a processor to a node
756 *   based on the rules of the scheduler.
757 */
758static inline bool _Scheduler_SMP_Enqueue_scheduled(
759  Scheduler_Context                *context,
760  Scheduler_Node                   *const node,
761  Priority_Control                  insert_priority,
762  Chain_Node_order                  order,
763  Scheduler_SMP_Extract             extract_from_ready,
764  Scheduler_SMP_Get_highest_ready   get_highest_ready,
765  Scheduler_SMP_Insert              insert_ready,
766  Scheduler_SMP_Insert              insert_scheduled,
767  Scheduler_SMP_Move                move_from_ready_to_scheduled,
768  Scheduler_SMP_Allocate_processor  allocate_processor
769)
770{
771  while ( true ) {
772    Scheduler_Node                   *highest_ready;
773    Scheduler_Try_to_schedule_action  action;
774
775    highest_ready = ( *get_highest_ready )( context, node );
776
777    /*
778     * The node has been extracted from the scheduled chain.  We have to place
779     * it now on the scheduled or ready set.
780     */
781    if (
782      node->sticky_level > 0
783        && ( *order )( &insert_priority, &highest_ready->Node.Chain )
784    ) {
785      ( *insert_scheduled )( context, node, insert_priority );
786
787      if ( _Scheduler_Node_get_idle( node ) != NULL ) {
788        Thread_Control   *owner;
789        ISR_lock_Context  lock_context;
790
791        owner = _Scheduler_Node_get_owner( node );
792        _Thread_Scheduler_acquire_critical( owner, &lock_context );
793
794        if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
795          _Thread_Scheduler_cancel_need_for_help(
796            owner,
797            _Thread_Get_CPU( owner )
798          );
799          _Scheduler_Discard_idle_thread(
800            context,
801            owner,
802            node,
803            _Scheduler_SMP_Release_idle_thread
804          );
805          _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
806        }
807
808        _Thread_Scheduler_release_critical( owner, &lock_context );
809      }
810
811      return false;
812    }
813
814    action = _Scheduler_Try_to_schedule_node(
815      context,
816      highest_ready,
817      _Scheduler_Node_get_idle( node ),
818      _Scheduler_SMP_Get_idle_thread
819    );
820
821    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
822      Thread_Control *idle;
823
824      _Scheduler_SMP_Preempt(
825        context,
826        highest_ready,
827        node,
828        allocate_processor
829      );
830
831      ( *insert_ready )( context, node, insert_priority );
832      ( *move_from_ready_to_scheduled )( context, highest_ready );
833
834      idle = _Scheduler_Release_idle_thread(
835        context,
836        node,
837        _Scheduler_SMP_Release_idle_thread
838      );
839      return ( idle == NULL );
840    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
841      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
842      _Scheduler_SMP_Node_change_state(
843        highest_ready,
844        SCHEDULER_SMP_NODE_SCHEDULED
845      );
846
847      ( *insert_ready )( context, node, insert_priority );
848      ( *move_from_ready_to_scheduled )( context, highest_ready );
849
850      _Scheduler_Exchange_idle_thread(
851        highest_ready,
852        node,
853        _Scheduler_Node_get_idle( node )
854      );
855      return false;
856    } else {
857      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
858
859      _Scheduler_SMP_Node_change_state(
860        highest_ready,
861        SCHEDULER_SMP_NODE_BLOCKED
862      );
863
864      ( *extract_from_ready )( context, highest_ready );
865    }
866  }
867}
868
869static inline void _Scheduler_SMP_Extract_from_scheduled(
870  Scheduler_Context *context,
871  Scheduler_Node    *node
872)
873{
874  (void) context;
875  _Chain_Extract_unprotected( &node->Node.Chain );
876}
877
878static inline void _Scheduler_SMP_Schedule_highest_ready(
879  Scheduler_Context                *context,
880  Scheduler_Node                   *victim,
881  Per_CPU_Control                  *victim_cpu,
882  Scheduler_SMP_Extract             extract_from_ready,
883  Scheduler_SMP_Get_highest_ready   get_highest_ready,
884  Scheduler_SMP_Move                move_from_ready_to_scheduled,
885  Scheduler_SMP_Allocate_processor  allocate_processor
886)
887{
888  Scheduler_Try_to_schedule_action action;
889
890  do {
891    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
892
893    action = _Scheduler_Try_to_schedule_node(
894      context,
895      highest_ready,
896      NULL,
897      _Scheduler_SMP_Get_idle_thread
898    );
899
900    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
901      _Scheduler_SMP_Allocate_processor(
902        context,
903        highest_ready,
904        victim,
905        victim_cpu,
906        allocate_processor
907      );
908
909      ( *move_from_ready_to_scheduled )( context, highest_ready );
910    } else {
911      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
912
913      _Scheduler_SMP_Node_change_state(
914        highest_ready,
915        SCHEDULER_SMP_NODE_BLOCKED
916      );
917
918      ( *extract_from_ready )( context, highest_ready );
919    }
920  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
921}
922
923static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
924  Scheduler_Context                *context,
925  Scheduler_Node                   *victim,
926  Per_CPU_Control                  *victim_cpu,
927  Scheduler_SMP_Extract             extract_from_ready,
928  Scheduler_SMP_Get_highest_ready   get_highest_ready,
929  Scheduler_SMP_Move                move_from_ready_to_scheduled,
930  Scheduler_SMP_Allocate_processor  allocate_processor
931)
932{
933  Scheduler_Try_to_schedule_action action;
934
935  do {
936    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
937
938    action = _Scheduler_Try_to_schedule_node(
939      context,
940      highest_ready,
941      NULL,
942      _Scheduler_SMP_Get_idle_thread
943    );
944
945    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
946      _Scheduler_SMP_Preempt(
947        context,
948        highest_ready,
949        victim,
950        allocate_processor
951      );
952
953      ( *move_from_ready_to_scheduled )( context, highest_ready );
954    } else {
955      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
956
957      _Scheduler_SMP_Node_change_state(
958        highest_ready,
959        SCHEDULER_SMP_NODE_BLOCKED
960      );
961
962      ( *extract_from_ready )( context, highest_ready );
963    }
964  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
965}
966
967/**
968 * @brief Blocks a thread.
969 *
970 * @param[in] context The scheduler instance context.
971 * @param[in] thread The thread of the scheduling operation.
972 * @param[in] node The scheduler node of the thread to block.
973 * @param[in] extract_from_scheduled Function to extract a node from the set of
974 *   scheduled nodes.
975 * @param[in] extract_from_ready Function to extract a node from the set of
976 *   ready nodes.
977 * @param[in] get_highest_ready Function to get the highest ready node.
978 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
979 *   of ready nodes to the set of scheduled nodes.
980 */
981static inline void _Scheduler_SMP_Block(
982  Scheduler_Context                *context,
983  Thread_Control                   *thread,
984  Scheduler_Node                   *node,
985  Scheduler_SMP_Extract             extract_from_scheduled,
986  Scheduler_SMP_Extract             extract_from_ready,
987  Scheduler_SMP_Get_highest_ready   get_highest_ready,
988  Scheduler_SMP_Move                move_from_ready_to_scheduled,
989  Scheduler_SMP_Allocate_processor  allocate_processor
990)
991{
992  Scheduler_SMP_Node_state  node_state;
993  Per_CPU_Control          *thread_cpu;
994
995  node_state = _Scheduler_SMP_Node_state( node );
996
997  thread_cpu = _Scheduler_Block_node(
998    context,
999    thread,
1000    node,
1001    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1002    _Scheduler_SMP_Get_idle_thread
1003  );
1004
1005  if ( thread_cpu != NULL ) {
1006    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1007
1008    if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1009      ( *extract_from_scheduled )( context, node );
1010      _Scheduler_SMP_Schedule_highest_ready(
1011        context,
1012        node,
1013        thread_cpu,
1014        extract_from_ready,
1015        get_highest_ready,
1016        move_from_ready_to_scheduled,
1017        allocate_processor
1018      );
1019    } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1020      ( *extract_from_ready )( context, node );
1021    }
1022  }
1023}
1024
1025static inline void _Scheduler_SMP_Unblock(
1026  Scheduler_Context     *context,
1027  Thread_Control        *thread,
1028  Scheduler_Node        *node,
1029  Scheduler_SMP_Update   update,
1030  Scheduler_SMP_Enqueue  enqueue
1031)
1032{
1033  Scheduler_SMP_Node_state  node_state;
1034  bool                      unblock;
1035
1036  node_state = _Scheduler_SMP_Node_state( node );
1037  unblock = _Scheduler_Unblock_node(
1038    context,
1039    thread,
1040    node,
1041    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1042    _Scheduler_SMP_Release_idle_thread
1043  );
1044
1045  if ( unblock ) {
1046    Priority_Control priority;
1047    bool             needs_help;
1048
1049    priority = _Scheduler_Node_get_priority( node );
1050    priority = SCHEDULER_PRIORITY_PURIFY( priority );
1051
1052    if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1053      ( *update )( context, node, priority );
1054    }
1055
1056    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1057      Priority_Control insert_priority;
1058
1059      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1060      insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1061      needs_help = ( *enqueue )( context, node, insert_priority );
1062    } else {
1063      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1064      _Assert( node->sticky_level > 0 );
1065      _Assert( node->idle == NULL );
1066      needs_help = true;
1067    }
1068
1069    if ( needs_help ) {
1070      _Scheduler_Ask_for_help( thread );
1071    }
1072  }
1073}
1074
1075static inline void _Scheduler_SMP_Update_priority(
1076  Scheduler_Context          *context,
1077  Thread_Control             *thread,
1078  Scheduler_Node             *node,
1079  Scheduler_SMP_Extract       extract_from_ready,
1080  Scheduler_SMP_Update        update,
1081  Scheduler_SMP_Enqueue       enqueue,
1082  Scheduler_SMP_Enqueue       enqueue_scheduled,
1083  Scheduler_SMP_Ask_for_help  ask_for_help
1084)
1085{
1086  Priority_Control         priority;
1087  Priority_Control         insert_priority;
1088  Scheduler_SMP_Node_state node_state;
1089
1090  insert_priority = _Scheduler_Node_get_priority( node );
1091  priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
1092
1093  if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
1094    if ( _Thread_Is_ready( thread ) ) {
1095      ( *ask_for_help )( context, thread, node );
1096    }
1097
1098    return;
1099  }
1100
1101  node_state = _Scheduler_SMP_Node_state( node );
1102
1103  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1104    _Scheduler_SMP_Extract_from_scheduled( context, node );
1105    ( *update )( context, node, priority );
1106    ( *enqueue_scheduled )( context, node, insert_priority );
1107  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1108    ( *extract_from_ready )( context, node );
1109    ( *update )( context, node, priority );
1110    ( *enqueue )( context, node, insert_priority );
1111  } else {
1112    ( *update )( context, node, priority );
1113
1114    if ( _Thread_Is_ready( thread ) ) {
1115      ( *ask_for_help )( context, thread, node );
1116    }
1117  }
1118}
1119
1120static inline void _Scheduler_SMP_Yield(
1121  Scheduler_Context     *context,
1122  Thread_Control        *thread,
1123  Scheduler_Node        *node,
1124  Scheduler_SMP_Extract  extract_from_ready,
1125  Scheduler_SMP_Enqueue  enqueue,
1126  Scheduler_SMP_Enqueue  enqueue_scheduled
1127)
1128{
1129  bool                     needs_help;
1130  Scheduler_SMP_Node_state node_state;
1131  Priority_Control         insert_priority;
1132
1133  node_state = _Scheduler_SMP_Node_state( node );
1134  insert_priority = _Scheduler_SMP_Node_priority( node );
1135  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1136
1137  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1138    _Scheduler_SMP_Extract_from_scheduled( context, node );
1139    ( *enqueue_scheduled )( context, node, insert_priority );
1140    needs_help = false;
1141  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1142    ( *extract_from_ready )( context, node );
1143
1144    needs_help = ( *enqueue )( context, node, insert_priority );
1145  } else {
1146    needs_help = true;
1147  }
1148
1149  if ( needs_help ) {
1150    _Scheduler_Ask_for_help( thread );
1151  }
1152}
1153
1154static inline void _Scheduler_SMP_Insert_scheduled(
1155  Scheduler_Context *context,
1156  Scheduler_Node    *node_to_insert,
1157  Priority_Control   priority_to_insert
1158)
1159{
1160  Scheduler_SMP_Context *self;
1161
1162  self = _Scheduler_SMP_Get_self( context );
1163
1164  _Chain_Insert_ordered_unprotected(
1165    &self->Scheduled,
1166    &node_to_insert->Node.Chain,
1167    &priority_to_insert,
1168    _Scheduler_SMP_Priority_less_equal
1169  );
1170}
1171
1172static inline bool _Scheduler_SMP_Ask_for_help(
1173  Scheduler_Context                  *context,
1174  Thread_Control                     *thread,
1175  Scheduler_Node                     *node,
1176  Chain_Node_order                    order,
1177  Scheduler_SMP_Insert                insert_ready,
1178  Scheduler_SMP_Insert                insert_scheduled,
1179  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1180  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1181  Scheduler_SMP_Allocate_processor    allocate_processor
1182)
1183{
1184  Scheduler_Node   *lowest_scheduled;
1185  ISR_lock_Context  lock_context;
1186  bool              success;
1187
1188  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1189
1190  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1191
1192  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1193    Scheduler_SMP_Node_state node_state;
1194
1195    node_state = _Scheduler_SMP_Node_state( node );
1196
1197    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1198      Priority_Control insert_priority;
1199
1200      insert_priority = _Scheduler_SMP_Node_priority( node );
1201
1202      if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
1203        _Thread_Scheduler_cancel_need_for_help(
1204          thread,
1205          _Thread_Get_CPU( thread )
1206        );
1207        _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1208        _Thread_Scheduler_release_critical( thread, &lock_context );
1209
1210        _Scheduler_SMP_Preempt(
1211          context,
1212          node,
1213          lowest_scheduled,
1214          allocate_processor
1215        );
1216
1217        ( *insert_scheduled )( context, node, insert_priority );
1218        ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1219
1220        _Scheduler_Release_idle_thread(
1221          context,
1222          lowest_scheduled,
1223          _Scheduler_SMP_Release_idle_thread
1224        );
1225        success = true;
1226      } else {
1227        _Thread_Scheduler_release_critical( thread, &lock_context );
1228        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1229        ( *insert_ready )( context, node, insert_priority );
1230        success = false;
1231      }
1232    } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1233      _Thread_Scheduler_cancel_need_for_help(
1234        thread,
1235        _Thread_Get_CPU( thread )
1236      );
1237      _Scheduler_Discard_idle_thread(
1238        context,
1239        thread,
1240        node,
1241        _Scheduler_SMP_Release_idle_thread
1242      );
1243      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1244      _Thread_Scheduler_release_critical( thread, &lock_context );
1245      success = true;
1246    } else {
1247      _Thread_Scheduler_release_critical( thread, &lock_context );
1248      success = false;
1249    }
1250  } else {
1251    _Thread_Scheduler_release_critical( thread, &lock_context );
1252    success = false;
1253  }
1254
1255  return success;
1256}
1257
1258static inline void _Scheduler_SMP_Reconsider_help_request(
1259  Scheduler_Context     *context,
1260  Thread_Control        *thread,
1261  Scheduler_Node        *node,
1262  Scheduler_SMP_Extract  extract_from_ready
1263)
1264{
1265  ISR_lock_Context lock_context;
1266
1267  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1268
1269  if (
1270    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1271      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1272      && node->sticky_level == 1
1273  ) {
1274    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1275    ( *extract_from_ready )( context, node );
1276  }
1277
1278  _Thread_Scheduler_release_critical( thread, &lock_context );
1279}
1280
1281static inline void _Scheduler_SMP_Withdraw_node(
1282  Scheduler_Context                *context,
1283  Thread_Control                   *thread,
1284  Scheduler_Node                   *node,
1285  Thread_Scheduler_state            next_state,
1286  Scheduler_SMP_Extract             extract_from_ready,
1287  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1288  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1289  Scheduler_SMP_Allocate_processor  allocate_processor
1290)
1291{
1292  ISR_lock_Context         lock_context;
1293  Scheduler_SMP_Node_state node_state;
1294
1295  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1296
1297  node_state = _Scheduler_SMP_Node_state( node );
1298  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1299
1300  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1301    Per_CPU_Control *thread_cpu;
1302
1303    thread_cpu = _Thread_Get_CPU( thread );
1304    _Scheduler_Thread_change_state( thread, next_state );
1305    _Thread_Scheduler_release_critical( thread, &lock_context );
1306
1307    _Scheduler_SMP_Extract_from_scheduled( context, node );
1308    _Scheduler_SMP_Schedule_highest_ready(
1309      context,
1310      node,
1311      thread_cpu,
1312      extract_from_ready,
1313      get_highest_ready,
1314      move_from_ready_to_scheduled,
1315      allocate_processor
1316    );
1317  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1318    _Thread_Scheduler_release_critical( thread, &lock_context );
1319    ( *extract_from_ready )( context, node );
1320  } else {
1321    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1322    _Thread_Scheduler_release_critical( thread, &lock_context );
1323  }
1324}
1325
1326static inline void _Scheduler_SMP_Do_start_idle(
1327  Scheduler_Context           *context,
1328  Thread_Control              *idle,
1329  Per_CPU_Control             *cpu,
1330  Scheduler_SMP_Register_idle  register_idle
1331)
1332{
1333  Scheduler_SMP_Context *self;
1334  Scheduler_SMP_Node    *node;
1335
1336  self = _Scheduler_SMP_Get_self( context );
1337  node = _Scheduler_SMP_Thread_get_node( idle );
1338
1339  _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
1340  node->state = SCHEDULER_SMP_NODE_SCHEDULED;
1341
1342  _Thread_Set_CPU( idle, cpu );
1343  ( *register_idle )( context, &node->Base, cpu );
1344  _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
1345  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1346}
1347
1348static inline void _Scheduler_SMP_Add_processor(
1349  Scheduler_Context           *context,
1350  Thread_Control              *idle,
1351  Scheduler_SMP_Has_ready      has_ready,
1352  Scheduler_SMP_Enqueue        enqueue_scheduled,
1353  Scheduler_SMP_Register_idle  register_idle
1354)
1355{
1356  Scheduler_SMP_Context *self;
1357  Scheduler_Node        *node;
1358
1359  self = _Scheduler_SMP_Get_self( context );
1360  idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1361  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1362  node = _Thread_Scheduler_get_home_node( idle );
1363  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
1364  ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
1365
1366  if ( ( *has_ready )( &self->Base ) ) {
1367    Priority_Control insert_priority;
1368
1369    insert_priority = _Scheduler_SMP_Node_priority( node );
1370    insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1371    ( *enqueue_scheduled )( &self->Base, node, insert_priority );
1372  } else {
1373    _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
1374  }
1375}
1376
1377static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1378  Scheduler_Context     *context,
1379  Per_CPU_Control       *cpu,
1380  Scheduler_SMP_Extract  extract_from_ready,
1381  Scheduler_SMP_Enqueue  enqueue
1382)
1383{
1384  Scheduler_SMP_Context *self;
1385  Chain_Node            *chain_node;
1386  Scheduler_Node        *victim_node;
1387  Thread_Control        *victim_user;
1388  Thread_Control        *victim_owner;
1389  Thread_Control        *idle;
1390
1391  self = _Scheduler_SMP_Get_self( context );
1392  chain_node = _Chain_First( &self->Scheduled );
1393
1394  do {
1395    _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1396    victim_node = (Scheduler_Node *) chain_node;
1397    victim_user = _Scheduler_Node_get_user( victim_node );
1398    chain_node = _Chain_Next( chain_node );
1399  } while ( _Thread_Get_CPU( victim_user ) != cpu );
1400
1401  _Scheduler_SMP_Extract_from_scheduled( context, victim_node );
1402  victim_owner = _Scheduler_Node_get_owner( victim_node );
1403
1404  if ( !victim_owner->is_idle ) {
1405    Scheduler_Node *idle_node;
1406
1407    _Scheduler_Release_idle_thread(
1408      &self->Base,
1409      victim_node,
1410      _Scheduler_SMP_Release_idle_thread
1411    );
1412    idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
1413    idle_node = _Thread_Scheduler_get_home_node( idle );
1414    ( *extract_from_ready )( &self->Base, idle_node );
1415    _Scheduler_SMP_Preempt(
1416      &self->Base,
1417      idle_node,
1418      victim_node,
1419      _Scheduler_SMP_Allocate_processor_exact
1420    );
1421
1422    if ( !_Chain_Is_empty( &self->Scheduled ) ) {
1423      Priority_Control insert_priority;
1424
1425      insert_priority = _Scheduler_SMP_Node_priority( victim_node );
1426      insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1427      ( *enqueue )( context, victim_node, insert_priority );
1428    }
1429  } else {
1430    _Assert( victim_owner == victim_user );
1431    _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
1432    idle = victim_owner;
1433    _Scheduler_SMP_Exctract_idle_thread( idle );
1434  }
1435
1436  return idle;
1437}
1438
1439static inline void _Scheduler_SMP_Set_affinity(
1440  Scheduler_Context               *context,
1441  Thread_Control                  *thread,
1442  Scheduler_Node                  *node,
1443  void                            *arg,
1444  Scheduler_SMP_Set_affinity       set_affinity,
1445  Scheduler_SMP_Extract            extract_from_ready,
1446  Scheduler_SMP_Get_highest_ready  get_highest_ready,
1447  Scheduler_SMP_Move               move_from_ready_to_scheduled,
1448  Scheduler_SMP_Enqueue            enqueue,
1449  Scheduler_SMP_Allocate_processor allocate_processor
1450)
1451{
1452  Scheduler_SMP_Node_state node_state;
1453  Priority_Control         insert_priority;
1454
1455  node_state = _Scheduler_SMP_Node_state( node );
1456  insert_priority = _Scheduler_SMP_Node_priority( node );
1457  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1458
1459  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1460    _Scheduler_SMP_Extract_from_scheduled( context, node );
1461    _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1462      context,
1463      node,
1464      _Thread_Get_CPU( thread ),
1465      extract_from_ready,
1466      get_highest_ready,
1467      move_from_ready_to_scheduled,
1468      allocate_processor
1469    );
1470    ( *set_affinity )( context, node, arg );
1471    ( *enqueue )( context, node, insert_priority );
1472  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1473    ( *extract_from_ready )( context, node );
1474    ( *set_affinity )( context, node, arg );
1475    ( *enqueue )( context, node, insert_priority );
1476  } else {
1477    ( *set_affinity )( context, node, arg );
1478  }
1479}
1480
1481/** @} */
1482
1483#ifdef __cplusplus
1484}
1485#endif /* __cplusplus */
1486
1487#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.