source: rtems/cpukit/include/rtems/score/schedulersmpimpl.h @ e97b7c9a

5
Last change on this file since e97b7c9a was e97b7c9a, checked in by Sebastian Huber <sebastian.huber@…>, on 04/11/19 at 11:47:50

score: Use an ISR lock for Per_CPU_Control::Lock

The use of a hand crafted lock for Per_CPU_Control::Lock was necessary
at some point in the SMP support development, but it is no longer
justified.

  • Property mode set to 100644
File size: 41.6 KB
RevLine 
[9d83f58a]1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
[4c20da4b]6 * @ingroup RTEMSScoreSchedulerSMP
[9d83f58a]7 */
8
9/*
[34487537]10 * Copyright (c) 2013, 2017 embedded brains GmbH.  All rights reserved.
[9d83f58a]11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
[c499856]20 * http://www.rtems.org/license/LICENSE.
[9d83f58a]21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
[38b59a6]27#include <rtems/score/assert.h>
[48c4a55]28#include <rtems/score/chainimpl.h>
[38b59a6]29#include <rtems/score/schedulersimpleimpl.h>
[351c14d]30#include <rtems/bspIo.h>
[9d83f58a]31
32#ifdef __cplusplus
33extern "C" {
34#endif /* __cplusplus */
35
36/**
[4c20da4b]37 * @addtogroup RTEMSScoreSchedulerSMP
[9d83f58a]38 *
[c6522a65]39 * The scheduler nodes can be in four states
40 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
[f39f667a]41 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
42 * - @ref SCHEDULER_SMP_NODE_READY.
[c6522a65]43 *
[f39f667a]44 * State transitions are triggered via basic operations
[c597fb1]45 * - _Scheduler_SMP_Enqueue(),
46 * - _Scheduler_SMP_Enqueue_scheduled(), and
[f39f667a]47 * - _Scheduler_SMP_Block().
[c6522a65]48 *
49 * @dot
50 * digraph {
51 *   node [style="filled"];
52 *
53 *   bs [label="BLOCKED"];
54 *   ss [label="SCHEDULED", fillcolor="green"];
55 *   rs [label="READY", fillcolor="red"];
56 *
57 *   edge [label="enqueue"];
58 *   edge [fontcolor="darkgreen", color="darkgreen"];
59 *
60 *   bs -> ss;
61 *
62 *   edge [fontcolor="red", color="red"];
63 *
64 *   bs -> rs;
65 *
66 *   edge [label="enqueue other"];
67 *
68 *   ss -> rs;
69 *
[f39f667a]70 *   edge [label="block"];
[c6522a65]71 *   edge [fontcolor="black", color="black"];
72 *
[b532bb2c]73 *   ss -> bs;
[c6522a65]74 *   rs -> bs;
75 *
[f39f667a]76 *   edge [label="block other"];
[c6522a65]77 *   edge [fontcolor="darkgreen", color="darkgreen"];
78 *
79 *   rs -> ss;
80 * }
81 * @enddot
82 *
83 * During system initialization each processor of the scheduler instance starts
84 * with an idle thread assigned to it.  Lets have a look at an example with two
85 * idle threads I and J with priority 5.  We also have blocked threads A, B and
[2d96533]86 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
87 * with respect to the thread priority from left to right in the below
88 * diagrams.  The highest priority node (lowest priority number) is the
89 * leftmost node.  Since the processor assignment is independent of the thread
90 * priority the processor indices may move from one state to the other.
[c6522a65]91 *
92 * @dot
93 * digraph {
94 *   node [style="filled"];
95 *   edge [dir="none"];
96 *   subgraph {
97 *     rank = same;
98 *
99 *     i [label="I (5)", fillcolor="green"];
100 *     j [label="J (5)", fillcolor="green"];
101 *     a [label="A (1)"];
102 *     b [label="B (2)"];
103 *     c [label="C (3)"];
104 *     i -> j;
105 *   }
106 *
107 *   subgraph {
108 *     rank = same;
109 *
110 *     p0 [label="PROCESSOR 0", shape="box"];
111 *     p1 [label="PROCESSOR 1", shape="box"];
112 *   }
113 *
114 *   i -> p0;
115 *   j -> p1;
116 * }
117 * @enddot
118 *
119 * Lets start A.  For this an enqueue operation is performed.
120 *
121 * @dot
122 * digraph {
123 *   node [style="filled"];
124 *   edge [dir="none"];
125 *
126 *   subgraph {
127 *     rank = same;
128 *
129 *     i [label="I (5)", fillcolor="green"];
130 *     j [label="J (5)", fillcolor="red"];
131 *     a [label="A (1)", fillcolor="green"];
132 *     b [label="B (2)"];
133 *     c [label="C (3)"];
134 *     a -> i;
135 *   }
136 *
137 *   subgraph {
138 *     rank = same;
139 *
140 *     p0 [label="PROCESSOR 0", shape="box"];
141 *     p1 [label="PROCESSOR 1", shape="box"];
142 *   }
143 *
144 *   i -> p0;
145 *   a -> p1;
146 * }
147 * @enddot
148 *
149 * Lets start C.
150 *
151 * @dot
152 * digraph {
153 *   node [style="filled"];
154 *   edge [dir="none"];
155 *
156 *   subgraph {
157 *     rank = same;
158 *
159 *     a [label="A (1)", fillcolor="green"];
160 *     c [label="C (3)", fillcolor="green"];
161 *     i [label="I (5)", fillcolor="red"];
162 *     j [label="J (5)", fillcolor="red"];
163 *     b [label="B (2)"];
164 *     a -> c;
165 *     i -> j;
166 *   }
167 *
168 *   subgraph {
169 *     rank = same;
170 *
171 *     p0 [label="PROCESSOR 0", shape="box"];
172 *     p1 [label="PROCESSOR 1", shape="box"];
173 *   }
174 *
175 *   c -> p0;
176 *   a -> p1;
177 * }
178 * @enddot
179 *
180 * Lets start B.
181 *
182 * @dot
183 * digraph {
184 *   node [style="filled"];
185 *   edge [dir="none"];
186 *
187 *   subgraph {
188 *     rank = same;
189 *
190 *     a [label="A (1)", fillcolor="green"];
191 *     b [label="B (2)", fillcolor="green"];
192 *     c [label="C (3)", fillcolor="red"];
193 *     i [label="I (5)", fillcolor="red"];
194 *     j [label="J (5)", fillcolor="red"];
195 *     a -> b;
196 *     c -> i -> j;
197 *   }
198 *
199 *   subgraph {
200 *     rank = same;
201 *
202 *     p0 [label="PROCESSOR 0", shape="box"];
203 *     p1 [label="PROCESSOR 1", shape="box"];
204 *   }
205 *
206 *   b -> p0;
207 *   a -> p1;
208 * }
209 * @enddot
210 *
[f39f667a]211 * Lets change the priority of thread A to 4.
[c6522a65]212 *
213 * @dot
214 * digraph {
215 *   node [style="filled"];
216 *   edge [dir="none"];
217 *
218 *   subgraph {
219 *     rank = same;
220 *
221 *     b [label="B (2)", fillcolor="green"];
222 *     c [label="C (3)", fillcolor="green"];
223 *     a [label="A (4)", fillcolor="red"];
224 *     i [label="I (5)", fillcolor="red"];
225 *     j [label="J (5)", fillcolor="red"];
226 *     b -> c;
227 *     a -> i -> j;
228 *   }
229 *
230 *   subgraph {
231 *     rank = same;
232 *
233 *     p0 [label="PROCESSOR 0", shape="box"];
234 *     p1 [label="PROCESSOR 1", shape="box"];
235 *   }
236 *
237 *   b -> p0;
238 *   c -> p1;
239 * }
240 * @enddot
241 *
[f39f667a]242 * Now perform a blocking operation with thread B.  Please note that thread A
243 * migrated now from processor 0 to processor 1 and thread C still executes on
244 * processor 1.
[c6522a65]245 *
246 * @dot
247 * digraph {
248 *   node [style="filled"];
249 *   edge [dir="none"];
250 *
251 *   subgraph {
252 *     rank = same;
253 *
254 *     c [label="C (3)", fillcolor="green"];
[f39f667a]255 *     a [label="A (4)", fillcolor="green"];
[c6522a65]256 *     i [label="I (5)", fillcolor="red"];
257 *     j [label="J (5)", fillcolor="red"];
[f39f667a]258 *     b [label="B (2)"];
259 *     c -> a;
[c6522a65]260 *     i -> j;
261 *   }
262 *
263 *   subgraph {
264 *     rank = same;
265 *
266 *     p0 [label="PROCESSOR 0", shape="box"];
267 *     p1 [label="PROCESSOR 1", shape="box"];
268 *   }
269 *
[f39f667a]270 *   a -> p0;
[c6522a65]271 *   c -> p1;
272 * }
273 * @enddot
274 *
[9d83f58a]275 * @{
276 */
277
[05ca53d]278typedef bool ( *Scheduler_SMP_Has_ready )(
279  Scheduler_Context *context
280);
281
[8f0c7a46]282typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
[238629f]283  Scheduler_Context *context,
[8f0c7a46]284  Scheduler_Node    *node
[238629f]285);
286
[8f0c7a46]287typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
[238629f]288  Scheduler_Context *context,
[4edcede7]289  Scheduler_Node    *filter
[48c4a55]290);
291
292typedef void ( *Scheduler_SMP_Extract )(
[3730a07f]293  Scheduler_Context *context,
[8f0c7a46]294  Scheduler_Node    *node_to_extract
[48c4a55]295);
296
297typedef void ( *Scheduler_SMP_Insert )(
[3730a07f]298  Scheduler_Context *context,
[c597fb1]299  Scheduler_Node    *node_to_insert,
300  Priority_Control   insert_priority
[48c4a55]301);
302
303typedef void ( *Scheduler_SMP_Move )(
[3730a07f]304  Scheduler_Context *context,
[8f0c7a46]305  Scheduler_Node    *node_to_move
[48c4a55]306);
307
[351c14d]308typedef bool ( *Scheduler_SMP_Ask_for_help )(
309  Scheduler_Context *context,
310  Thread_Control    *thread,
311  Scheduler_Node    *node
312);
313
[f39f667a]314typedef void ( *Scheduler_SMP_Update )(
315  Scheduler_Context *context,
[8f0c7a46]316  Scheduler_Node    *node_to_update,
[d9b54da]317  Priority_Control   new_priority
[f39f667a]318);
319
[34487537]320typedef void ( *Scheduler_SMP_Set_affinity )(
321  Scheduler_Context *context,
322  Scheduler_Node    *node,
323  void              *arg
324);
325
[63e2ca1b]326typedef bool ( *Scheduler_SMP_Enqueue )(
[f39f667a]327  Scheduler_Context *context,
[c597fb1]328  Scheduler_Node    *node_to_enqueue,
329  Priority_Control   priority
[f39f667a]330);
331
[238629f]332typedef void ( *Scheduler_SMP_Allocate_processor )(
[8f0c7a46]333  Scheduler_Context *context,
[d19dc071]334  Scheduler_Node    *scheduled,
335  Scheduler_Node    *victim,
[edb020c]336  Per_CPU_Control   *victim_cpu
[238629f]337);
338
[34487537]339typedef void ( *Scheduler_SMP_Register_idle )(
340  Scheduler_Context *context,
341  Scheduler_Node    *idle,
342  Per_CPU_Control   *cpu
343);
344
345static inline void _Scheduler_SMP_Do_nothing_register_idle(
346  Scheduler_Context *context,
347  Scheduler_Node    *idle,
348  Per_CPU_Control   *cpu
349)
350{
351  (void) context;
352  (void) idle;
353  (void) cpu;
354}
355
[c597fb1]356static inline bool _Scheduler_SMP_Priority_less_equal(
[0c286e3]357  const void       *to_insert,
[8f0c7a46]358  const Chain_Node *next
359)
360{
[0c286e3]361  const Priority_Control   *priority_to_insert;
362  const Scheduler_SMP_Node *node_next;
[8f0c7a46]363
[0c286e3]364  priority_to_insert = (const Priority_Control *) to_insert;
365  node_next = (const Scheduler_SMP_Node *) next;
366
367  return *priority_to_insert <= node_next->priority;
[8f0c7a46]368}
369
[3730a07f]370static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
371  Scheduler_Context *context
372)
373{
374  return (Scheduler_SMP_Context *) context;
375}
376
[494c2e3]377static inline void _Scheduler_SMP_Initialize(
[e1598a6]378  Scheduler_SMP_Context *self
[494c2e3]379)
[9d83f58a]380{
[494c2e3]381  _Chain_Initialize_empty( &self->Scheduled );
[5c3d250]382  _Chain_Initialize_empty( &self->Idle_threads );
[9d83f58a]383}
384
[08d9760]385static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
[beab7329]386  Thread_Control *thread
387)
388{
[c0f1f52]389  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
[beab7329]390}
391
[5c3d250]392static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
393  Thread_Control *thread
394)
395{
[7f742432]396  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
[5c3d250]397}
398
[8f0c7a46]399static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
400  Scheduler_Node *node
401)
402{
403  return (Scheduler_SMP_Node *) node;
404}
405
[501043a]406static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
407  const Scheduler_Node *node
408)
409{
410  return ( (const Scheduler_SMP_Node *) node )->state;
411}
412
413static inline Priority_Control _Scheduler_SMP_Node_priority(
414  const Scheduler_Node *node
415)
416{
417  return ( (const Scheduler_SMP_Node *) node )->priority;
418}
419
[beab7329]420static inline void _Scheduler_SMP_Node_initialize(
[300f6a48]421  const Scheduler_Control *scheduler,
422  Scheduler_SMP_Node      *node,
423  Thread_Control          *thread,
424  Priority_Control         priority
[beab7329]425)
426{
[300f6a48]427  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
[beab7329]428  node->state = SCHEDULER_SMP_NODE_BLOCKED;
[9bfad8c]429  node->priority = priority;
[beab7329]430}
431
[8f0c7a46]432static inline void _Scheduler_SMP_Node_update_priority(
433  Scheduler_SMP_Node *node,
434  Priority_Control    new_priority
435)
436{
437  node->priority = new_priority;
438}
439
[beab7329]440static inline void _Scheduler_SMP_Node_change_state(
[3a72411]441  Scheduler_Node           *node,
442  Scheduler_SMP_Node_state  new_state
[beab7329]443)
444{
[3a72411]445  Scheduler_SMP_Node *the_node;
446
447  the_node = _Scheduler_SMP_Node_downcast( node );
448  the_node->state = new_state;
[beab7329]449}
450
[38b59a6]451static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
[8f0c7a46]452  const Scheduler_Context *context,
453  const Per_CPU_Control   *cpu
[38b59a6]454)
455{
[1c46b80]456  return cpu->Scheduler.context == context;
[38b59a6]457}
458
[5c3d250]459static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
[27783f6]460  Scheduler_Context *context
[5c3d250]461)
462{
463  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
464  Thread_Control *idle = (Thread_Control *)
465    _Chain_Get_first_unprotected( &self->Idle_threads );
466
467  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
468
469  return idle;
470}
471
472static inline void _Scheduler_SMP_Release_idle_thread(
[27783f6]473  Scheduler_Context *context,
474  Thread_Control    *idle
[5c3d250]475)
476{
477  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
478
479  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
480}
481
[05ca53d]482static inline void _Scheduler_SMP_Exctract_idle_thread(
483  Thread_Control *idle
484)
485{
486  _Chain_Extract_unprotected( &idle->Object.Node );
487}
488
[19e41767]489static inline void _Scheduler_SMP_Allocate_processor_lazy(
[8f0c7a46]490  Scheduler_Context *context,
[d19dc071]491  Scheduler_Node    *scheduled,
492  Scheduler_Node    *victim,
[edb020c]493  Per_CPU_Control   *victim_cpu
[fc2ad63]494)
495{
[d19dc071]496  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
497  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
[8f0c7a46]498  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
[38b59a6]499  Per_CPU_Control *cpu_self = _Per_CPU_Get();
[fc2ad63]500  Thread_Control *heir;
501
[38b59a6]502  _Assert( _ISR_Get_level() != 0 );
[fc2ad63]503
[8f0c7a46]504  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
505    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
506      heir = scheduled_cpu->heir;
[835b88b]507      _Thread_Dispatch_update_heir(
[8f0c7a46]508        cpu_self,
509        scheduled_cpu,
510        scheduled_thread
511      );
[38b59a6]512    } else {
513      /* We have to force a migration to our processor set */
[8f0c7a46]514      heir = scheduled_thread;
[38b59a6]515    }
[fc2ad63]516  } else {
[8f0c7a46]517    heir = scheduled_thread;
[fc2ad63]518  }
519
[8f0c7a46]520  if ( heir != victim_thread ) {
521    _Thread_Set_CPU( heir, victim_cpu );
[835b88b]522    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
[fc2ad63]523  }
524}
525
[09c87fb]526/*
527 * This method is slightly different from
528 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
529 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
530 * but does not take into account affinity.
531 */
532static inline void _Scheduler_SMP_Allocate_processor_exact(
533  Scheduler_Context *context,
[d19dc071]534  Scheduler_Node    *scheduled,
535  Scheduler_Node    *victim,
[edb020c]536  Per_CPU_Control   *victim_cpu
[09c87fb]537)
538{
[d19dc071]539  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
[09c87fb]540  Per_CPU_Control *cpu_self = _Per_CPU_Get();
541
542  (void) context;
[d19dc071]543  (void) victim;
[09c87fb]544
545  _Thread_Set_CPU( scheduled_thread, victim_cpu );
546  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
547}
548
[19e41767]549static inline void _Scheduler_SMP_Allocate_processor(
550  Scheduler_Context                *context,
551  Scheduler_Node                   *scheduled,
[d19dc071]552  Scheduler_Node                   *victim,
[edb020c]553  Per_CPU_Control                  *victim_cpu,
[19e41767]554  Scheduler_SMP_Allocate_processor  allocate_processor
555)
556{
[3a72411]557  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
[d19dc071]558  ( *allocate_processor )( context, scheduled, victim, victim_cpu );
[19e41767]559}
560
[a7a8ec03]561static inline Thread_Control *_Scheduler_SMP_Preempt(
562  Scheduler_Context                *context,
563  Scheduler_Node                   *scheduled,
564  Scheduler_Node                   *victim,
565  Scheduler_SMP_Allocate_processor  allocate_processor
566)
567{
568  Thread_Control   *victim_thread;
[e97b7c9a]569  ISR_lock_Context  scheduler_lock_context;
[edb020c]570  Per_CPU_Control  *victim_cpu;
[a7a8ec03]571
572  victim_thread = _Scheduler_Node_get_user( victim );
573  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
574
[e97b7c9a]575  _Thread_Scheduler_acquire_critical( victim_thread, &scheduler_lock_context );
[351c14d]576
[edb020c]577  victim_cpu = _Thread_Get_CPU( victim_thread );
[351c14d]578
579  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
580    _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
581
582    if ( victim_thread->Scheduler.helping_nodes > 0 ) {
[e97b7c9a]583      ISR_lock_Context per_cpu_lock_context;
584
585      _Per_CPU_Acquire( victim_cpu, &per_cpu_lock_context );
[351c14d]586      _Chain_Append_unprotected(
587        &victim_cpu->Threads_in_need_for_help,
588        &victim_thread->Scheduler.Help_node
589      );
[e97b7c9a]590      _Per_CPU_Release( victim_cpu, &per_cpu_lock_context );
[351c14d]591    }
592  }
593
[e97b7c9a]594  _Thread_Scheduler_release_critical( victim_thread, &scheduler_lock_context );
[a7a8ec03]595
596  _Scheduler_SMP_Allocate_processor(
597    context,
598    scheduled,
[d19dc071]599    victim,
[edb020c]600    victim_cpu,
[a7a8ec03]601    allocate_processor
602  );
603
604  return victim_thread;
605}
606
[8f0c7a46]607static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
[238629f]608  Scheduler_Context *context,
[4edcede7]609  Scheduler_Node    *filter
[aea4a91]610)
611{
[238629f]612  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
[494c2e3]613  Chain_Control *scheduled = &self->Scheduled;
[8f0c7a46]614  Scheduler_Node *lowest_scheduled =
615    (Scheduler_Node *) _Chain_Last( scheduled );
[aea4a91]616
[8f0c7a46]617  (void) filter;
[aea4a91]618
[15dbc710]619  _Assert( &lowest_scheduled->Node.Chain != _Chain_Tail( scheduled ) );
[351c14d]620  _Assert(
[15dbc710]621    _Chain_Next( &lowest_scheduled->Node.Chain ) == _Chain_Tail( scheduled )
[351c14d]622  );
[238629f]623
[8f0c7a46]624  return lowest_scheduled;
[aea4a91]625}
626
[63e2ca1b]627static inline void _Scheduler_SMP_Enqueue_to_scheduled(
[5c3d250]628  Scheduler_Context                *context,
629  Scheduler_Node                   *node,
[c597fb1]630  Priority_Control                  priority,
[5c3d250]631  Scheduler_Node                   *lowest_scheduled,
632  Scheduler_SMP_Insert              insert_scheduled,
633  Scheduler_SMP_Move                move_from_scheduled_to_ready,
[27783f6]634  Scheduler_SMP_Allocate_processor  allocate_processor
[5c3d250]635)
636{
[be0366b]637  Scheduler_Try_to_schedule_action action;
[5c3d250]638
[be0366b]639  action = _Scheduler_Try_to_schedule_node(
[5c3d250]640    context,
641    node,
[be0366b]642    _Scheduler_Node_get_idle( lowest_scheduled ),
643    _Scheduler_SMP_Get_idle_thread
[5c3d250]644  );
645
[be0366b]646  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
[63e2ca1b]647    _Scheduler_SMP_Preempt(
[be0366b]648      context,
649      node,
650      lowest_scheduled,
651      allocate_processor
652    );
653
[c597fb1]654    ( *insert_scheduled )( context, node, priority );
[be0366b]655    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
656
[63e2ca1b]657    _Scheduler_Release_idle_thread(
[be0366b]658      context,
659      lowest_scheduled,
660      _Scheduler_SMP_Release_idle_thread
661    );
662  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
663    _Scheduler_SMP_Node_change_state(
[3a72411]664      lowest_scheduled,
[be0366b]665      SCHEDULER_SMP_NODE_READY
666    );
[3a72411]667    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
[be0366b]668
[c597fb1]669    ( *insert_scheduled )( context, node, priority );
[be0366b]670    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
671
672    _Scheduler_Exchange_idle_thread(
673      node,
674      lowest_scheduled,
675      _Scheduler_Node_get_idle( lowest_scheduled )
676    );
[5c3d250]677  } else {
[be0366b]678    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[3a72411]679    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
[5c3d250]680  }
681}
682
[c6522a65]683/**
[8f0c7a46]684 * @brief Enqueues a node according to the specified order function.
[c6522a65]685 *
[8f0c7a46]686 * The node must not be in the scheduled state.
[c0bff5e]687 *
[c6522a65]688 * @param[in] context The scheduler instance context.
[8f0c7a46]689 * @param[in] node The node to enqueue.
[c597fb1]690 * @param[in] priority The node insert priority.
[c6522a65]691 * @param[in] order The order function.
692 * @param[in] insert_ready Function to insert a node into the set of ready
[238629f]693 *   nodes.
[c6522a65]694 * @param[in] insert_scheduled Function to insert a node into the set of
[238629f]695 *   scheduled nodes.
[c6522a65]696 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
[238629f]697 *   of scheduled nodes to the set of ready nodes.
[8f0c7a46]698 * @param[in] get_lowest_scheduled Function to select the node from the
[82df6f3]699 *   scheduled nodes to replace.  It may not be possible to find one, in this
700 *   case a pointer must be returned so that the order functions returns false
701 *   if this pointer is passed as the second argument to the order function.
[8f0c7a46]702 * @param[in] allocate_processor Function to allocate a processor to a node
[238629f]703 *   based on the rules of the scheduler.
[c6522a65]704 */
[c597fb1]705static inline bool _Scheduler_SMP_Enqueue(
[d9b54da]706  Scheduler_Context                  *context,
[8f0c7a46]707  Scheduler_Node                     *node,
[c597fb1]708  Priority_Control                    insert_priority,
[d9b54da]709  Chain_Node_order                    order,
[238629f]710  Scheduler_SMP_Insert                insert_ready,
711  Scheduler_SMP_Insert                insert_scheduled,
712  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
713  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
[27783f6]714  Scheduler_SMP_Allocate_processor    allocate_processor
[48c4a55]715)
[c0bff5e]716{
[c597fb1]717  bool            needs_help;
718  Scheduler_Node *lowest_scheduled;
[63e2ca1b]719
[4edcede7]720  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
[c0bff5e]721
[c597fb1]722  if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
[63e2ca1b]723    _Scheduler_SMP_Enqueue_to_scheduled(
[19e41767]724      context,
725      node,
[c597fb1]726      insert_priority,
[19e41767]727      lowest_scheduled,
[5c3d250]728      insert_scheduled,
729      move_from_scheduled_to_ready,
[27783f6]730      allocate_processor
[19e41767]731    );
[63e2ca1b]732    needs_help = false;
[c0bff5e]733  } else {
[c597fb1]734    ( *insert_ready )( context, node, insert_priority );
[63e2ca1b]735    needs_help = true;
[c0bff5e]736  }
[8568341]737
738  return needs_help;
[c0bff5e]739}
740
741/**
[8f0c7a46]742 * @brief Enqueues a scheduled node according to the specified order
[c0bff5e]743 * function.
744 *
745 * @param[in] context The scheduler instance context.
[8f0c7a46]746 * @param[in] node The node to enqueue.
[c0bff5e]747 * @param[in] order The order function.
[5c3d250]748 * @param[in] extract_from_ready Function to extract a node from the set of
749 *   ready nodes.
[c0bff5e]750 * @param[in] get_highest_ready Function to get the highest ready node.
751 * @param[in] insert_ready Function to insert a node into the set of ready
[238629f]752 *   nodes.
[c0bff5e]753 * @param[in] insert_scheduled Function to insert a node into the set of
[238629f]754 *   scheduled nodes.
[c0bff5e]755 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
[238629f]756 *   of ready nodes to the set of scheduled nodes.
[8f0c7a46]757 * @param[in] allocate_processor Function to allocate a processor to a node
[238629f]758 *   based on the rules of the scheduler.
[c0bff5e]759 */
[c597fb1]760static inline bool _Scheduler_SMP_Enqueue_scheduled(
[d9b54da]761  Scheduler_Context                *context,
[c597fb1]762  Scheduler_Node                   *const node,
763  Priority_Control                  insert_priority,
[238629f]764  Chain_Node_order                  order,
[5c3d250]765  Scheduler_SMP_Extract             extract_from_ready,
[238629f]766  Scheduler_SMP_Get_highest_ready   get_highest_ready,
767  Scheduler_SMP_Insert              insert_ready,
768  Scheduler_SMP_Insert              insert_scheduled,
769  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]770  Scheduler_SMP_Allocate_processor  allocate_processor
[c0bff5e]771)
[48c4a55]772{
[d057d653]773  while ( true ) {
774    Scheduler_Node                   *highest_ready;
775    Scheduler_Try_to_schedule_action  action;
[48c4a55]776
[d057d653]777    highest_ready = ( *get_highest_ready )( context, node );
[5c3d250]778
779    /*
780     * The node has been extracted from the scheduled chain.  We have to place
781     * it now on the scheduled or ready set.
782     */
[6771359f]783    if (
784      node->sticky_level > 0
[c597fb1]785        && ( *order )( &insert_priority, &highest_ready->Node.Chain )
[6771359f]786    ) {
[c597fb1]787      ( *insert_scheduled )( context, node, insert_priority );
[6771359f]788
789      if ( _Scheduler_Node_get_idle( node ) != NULL ) {
790        Thread_Control   *owner;
791        ISR_lock_Context  lock_context;
792
793        owner = _Scheduler_Node_get_owner( node );
794        _Thread_Scheduler_acquire_critical( owner, &lock_context );
795
796        if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
797          _Thread_Scheduler_cancel_need_for_help(
798            owner,
799            _Thread_Get_CPU( owner )
800          );
801          _Scheduler_Discard_idle_thread(
802            context,
803            owner,
804            node,
805            _Scheduler_SMP_Release_idle_thread
806          );
807          _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
808        }
809
810        _Thread_Scheduler_release_critical( owner, &lock_context );
811      }
812
[63e2ca1b]813      return false;
[d057d653]814    }
[5c3d250]815
[d057d653]816    action = _Scheduler_Try_to_schedule_node(
817      context,
818      highest_ready,
819      _Scheduler_Node_get_idle( node ),
820      _Scheduler_SMP_Get_idle_thread
821    );
822
823    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
824      Thread_Control *idle;
[5c3d250]825
[63e2ca1b]826      _Scheduler_SMP_Preempt(
[5c3d250]827        context,
828        highest_ready,
[d057d653]829        node,
830        allocate_processor
[5c3d250]831      );
[c0bff5e]832
[c597fb1]833      ( *insert_ready )( context, node, insert_priority );
[d057d653]834      ( *move_from_ready_to_scheduled )( context, highest_ready );
835
836      idle = _Scheduler_Release_idle_thread(
837        context,
838        node,
839        _Scheduler_SMP_Release_idle_thread
840      );
[63e2ca1b]841      return ( idle == NULL );
[d057d653]842    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
[3a72411]843      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
[d057d653]844      _Scheduler_SMP_Node_change_state(
[3a72411]845        highest_ready,
[d057d653]846        SCHEDULER_SMP_NODE_SCHEDULED
847      );
[19e41767]848
[c597fb1]849      ( *insert_ready )( context, node, insert_priority );
[d057d653]850      ( *move_from_ready_to_scheduled )( context, highest_ready );
[8568341]851
[d057d653]852      _Scheduler_Exchange_idle_thread(
853        highest_ready,
854        node,
855        _Scheduler_Node_get_idle( node )
856      );
[63e2ca1b]857      return false;
[d057d653]858    } else {
859      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[be0366b]860
[d057d653]861      _Scheduler_SMP_Node_change_state(
[3a72411]862        highest_ready,
[d057d653]863        SCHEDULER_SMP_NODE_BLOCKED
864      );
[8568341]865
[d057d653]866      ( *extract_from_ready )( context, highest_ready );
867    }
868  }
[48c4a55]869}
870
[c0bff5e]871static inline void _Scheduler_SMP_Extract_from_scheduled(
[3aad9d9b]872  Scheduler_Context *context,
873  Scheduler_Node    *node
[c0bff5e]874)
[f39f667a]875{
[3aad9d9b]876  (void) context;
[15dbc710]877  _Chain_Extract_unprotected( &node->Node.Chain );
[f39f667a]878}
879
[48c4a55]880static inline void _Scheduler_SMP_Schedule_highest_ready(
[d9b54da]881  Scheduler_Context                *context,
[8f0c7a46]882  Scheduler_Node                   *victim,
[edb020c]883  Per_CPU_Control                  *victim_cpu,
[5c3d250]884  Scheduler_SMP_Extract             extract_from_ready,
[d9b54da]885  Scheduler_SMP_Get_highest_ready   get_highest_ready,
886  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]887  Scheduler_SMP_Allocate_processor  allocate_processor
[48c4a55]888)
889{
[d057d653]890  Scheduler_Try_to_schedule_action action;
891
[be0366b]892  do {
[5c3d250]893    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
894
[be0366b]895    action = _Scheduler_Try_to_schedule_node(
896      context,
897      highest_ready,
898      NULL,
899      _Scheduler_SMP_Get_idle_thread
900    );
901
902    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
[5c3d250]903      _Scheduler_SMP_Allocate_processor(
904        context,
905        highest_ready,
[d19dc071]906        victim,
[edb020c]907        victim_cpu,
[5c3d250]908        allocate_processor
909      );
[48c4a55]910
[5c3d250]911      ( *move_from_ready_to_scheduled )( context, highest_ready );
912    } else {
[be0366b]913      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
914
[5c3d250]915      _Scheduler_SMP_Node_change_state(
[3a72411]916        highest_ready,
[5c3d250]917        SCHEDULER_SMP_NODE_BLOCKED
918      );
[19e41767]919
[5c3d250]920      ( *extract_from_ready )( context, highest_ready );
921    }
[d057d653]922  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[48c4a55]923}
924
[34487537]925static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
926  Scheduler_Context                *context,
927  Scheduler_Node                   *victim,
928  Per_CPU_Control                  *victim_cpu,
929  Scheduler_SMP_Extract             extract_from_ready,
930  Scheduler_SMP_Get_highest_ready   get_highest_ready,
931  Scheduler_SMP_Move                move_from_ready_to_scheduled,
932  Scheduler_SMP_Allocate_processor  allocate_processor
933)
934{
935  Scheduler_Try_to_schedule_action action;
936
937  do {
938    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
939
940    action = _Scheduler_Try_to_schedule_node(
941      context,
942      highest_ready,
943      NULL,
944      _Scheduler_SMP_Get_idle_thread
945    );
946
947    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
948      _Scheduler_SMP_Preempt(
949        context,
950        highest_ready,
951        victim,
952        allocate_processor
953      );
954
955      ( *move_from_ready_to_scheduled )( context, highest_ready );
956    } else {
957      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
958
959      _Scheduler_SMP_Node_change_state(
960        highest_ready,
961        SCHEDULER_SMP_NODE_BLOCKED
962      );
963
964      ( *extract_from_ready )( context, highest_ready );
965    }
966  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
967}
968
[c6522a65]969/**
[f39f667a]970 * @brief Blocks a thread.
[c6522a65]971 *
972 * @param[in] context The scheduler instance context.
973 * @param[in] thread The thread of the scheduling operation.
[e382a1b]974 * @param[in] node The scheduler node of the thread to block.
[3aad9d9b]975 * @param[in] extract_from_scheduled Function to extract a node from the set of
976 *   scheduled nodes.
[f39f667a]977 * @param[in] extract_from_ready Function to extract a node from the set of
[5c3d250]978 *   ready nodes.
[c6522a65]979 * @param[in] get_highest_ready Function to get the highest ready node.
980 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
[5c3d250]981 *   of ready nodes to the set of scheduled nodes.
[c6522a65]982 */
[f39f667a]983static inline void _Scheduler_SMP_Block(
[d9b54da]984  Scheduler_Context                *context,
985  Thread_Control                   *thread,
[e382a1b]986  Scheduler_Node                   *node,
[3aad9d9b]987  Scheduler_SMP_Extract             extract_from_scheduled,
[d9b54da]988  Scheduler_SMP_Extract             extract_from_ready,
989  Scheduler_SMP_Get_highest_ready   get_highest_ready,
990  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]991  Scheduler_SMP_Allocate_processor  allocate_processor
[48c4a55]992)
993{
[edb020c]994  Scheduler_SMP_Node_state  node_state;
995  Per_CPU_Control          *thread_cpu;
[cceb19f4]996
[e382a1b]997  node_state = _Scheduler_SMP_Node_state( node );
[cceb19f4]998
[edb020c]999  thread_cpu = _Scheduler_Block_node(
[5c3d250]1000    context,
[cceb19f4]1001    thread,
[e382a1b]1002    node,
1003    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
[27783f6]1004    _Scheduler_SMP_Get_idle_thread
[5c3d250]1005  );
[351c14d]1006
[edb020c]1007  if ( thread_cpu != NULL ) {
[3a72411]1008    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
[beab7329]1009
[e382a1b]1010    if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
[3aad9d9b]1011      ( *extract_from_scheduled )( context, node );
[5c3d250]1012      _Scheduler_SMP_Schedule_highest_ready(
1013        context,
[e382a1b]1014        node,
[edb020c]1015        thread_cpu,
[5c3d250]1016        extract_from_ready,
1017        get_highest_ready,
1018        move_from_ready_to_scheduled,
[27783f6]1019        allocate_processor
[5c3d250]1020      );
[351c14d]1021    } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
[e382a1b]1022      ( *extract_from_ready )( context, node );
[5c3d250]1023    }
[48c4a55]1024  }
1025}
1026
[ca1e546e]1027static inline void _Scheduler_SMP_Unblock(
[9bfad8c]1028  Scheduler_Context     *context,
1029  Thread_Control        *thread,
[72e0bdb]1030  Scheduler_Node        *node,
[9bfad8c]1031  Scheduler_SMP_Update   update,
[c597fb1]1032  Scheduler_SMP_Enqueue  enqueue
[c0bff5e]1033)
1034{
[72e0bdb]1035  Scheduler_SMP_Node_state  node_state;
1036  bool                      unblock;
[9bfad8c]1037
[72e0bdb]1038  node_state = _Scheduler_SMP_Node_state( node );
[9bfad8c]1039  unblock = _Scheduler_Unblock_node(
[5c3d250]1040    context,
1041    thread,
[72e0bdb]1042    node,
1043    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
[27783f6]1044    _Scheduler_SMP_Release_idle_thread
[5c3d250]1045  );
[c0bff5e]1046
[5c3d250]1047  if ( unblock ) {
[c597fb1]1048    Priority_Control priority;
[ca1e546e]1049    bool             needs_help;
[9bfad8c]1050
[c597fb1]1051    priority = _Scheduler_Node_get_priority( node );
1052    priority = SCHEDULER_PRIORITY_PURIFY( priority );
[9bfad8c]1053
[c597fb1]1054    if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1055      ( *update )( context, node, priority );
[9bfad8c]1056    }
1057
[72e0bdb]1058    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
[c597fb1]1059      Priority_Control insert_priority;
[cceb19f4]1060
[c597fb1]1061      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1062      insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1063      needs_help = ( *enqueue )( context, node, insert_priority );
[cceb19f4]1064    } else {
[72e0bdb]1065      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
[6771359f]1066      _Assert( node->sticky_level > 0 );
[72e0bdb]1067      _Assert( node->idle == NULL );
[63e2ca1b]1068      needs_help = true;
[cceb19f4]1069    }
[5c3d250]1070
[ca1e546e]1071    if ( needs_help ) {
1072      _Scheduler_Ask_for_help( thread );
1073    }
1074  }
[c0bff5e]1075}
1076
[9c238e1]1077static inline void _Scheduler_SMP_Update_priority(
[63e2ca1b]1078  Scheduler_Context          *context,
1079  Thread_Control             *thread,
1080  Scheduler_Node             *node,
1081  Scheduler_SMP_Extract       extract_from_ready,
1082  Scheduler_SMP_Update        update,
[c597fb1]1083  Scheduler_SMP_Enqueue       enqueue,
1084  Scheduler_SMP_Enqueue       enqueue_scheduled,
[63e2ca1b]1085  Scheduler_SMP_Ask_for_help  ask_for_help
[48c4a55]1086)
1087{
[c597fb1]1088  Priority_Control         priority;
1089  Priority_Control         insert_priority;
[501043a]1090  Scheduler_SMP_Node_state node_state;
[9bfad8c]1091
[c597fb1]1092  insert_priority = _Scheduler_Node_get_priority( node );
1093  priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
[9bfad8c]1094
[c597fb1]1095  if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
[351c14d]1096    if ( _Thread_Is_ready( thread ) ) {
1097      ( *ask_for_help )( context, thread, node );
1098    }
1099
[9c238e1]1100    return;
[9bfad8c]1101  }
[a336d51]1102
[501043a]1103  node_state = _Scheduler_SMP_Node_state( node );
1104
1105  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
[3aad9d9b]1106    _Scheduler_SMP_Extract_from_scheduled( context, node );
[c597fb1]1107    ( *update )( context, node, priority );
1108    ( *enqueue_scheduled )( context, node, insert_priority );
[501043a]1109  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1110    ( *extract_from_ready )( context, node );
[c597fb1]1111    ( *update )( context, node, priority );
1112    ( *enqueue )( context, node, insert_priority );
[5c3d250]1113  } else {
[c597fb1]1114    ( *update )( context, node, priority );
[5c3d250]1115
[351c14d]1116    if ( _Thread_Is_ready( thread ) ) {
1117      ( *ask_for_help )( context, thread, node );
1118    }
[f39f667a]1119  }
[48c4a55]1120}
1121
[ca1e546e]1122static inline void _Scheduler_SMP_Yield(
[63e2ca1b]1123  Scheduler_Context     *context,
1124  Thread_Control        *thread,
1125  Scheduler_Node        *node,
1126  Scheduler_SMP_Extract  extract_from_ready,
[c597fb1]1127  Scheduler_SMP_Enqueue  enqueue,
1128  Scheduler_SMP_Enqueue  enqueue_scheduled
[701dd96f]1129)
1130{
[63e2ca1b]1131  bool                     needs_help;
1132  Scheduler_SMP_Node_state node_state;
[c597fb1]1133  Priority_Control         insert_priority;
[6a82f1ae]1134
1135  node_state = _Scheduler_SMP_Node_state( node );
[c597fb1]1136  insert_priority = _Scheduler_SMP_Node_priority( node );
1137  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
[701dd96f]1138
[6a82f1ae]1139  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
[3aad9d9b]1140    _Scheduler_SMP_Extract_from_scheduled( context, node );
[c597fb1]1141    ( *enqueue_scheduled )( context, node, insert_priority );
[088acbb0]1142    needs_help = false;
[6a82f1ae]1143  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
[2df4abc]1144    ( *extract_from_ready )( context, node );
[701dd96f]1145
[c597fb1]1146    needs_help = ( *enqueue )( context, node, insert_priority );
[6a82f1ae]1147  } else {
[63e2ca1b]1148    needs_help = true;
[701dd96f]1149  }
[8568341]1150
[ca1e546e]1151  if ( needs_help ) {
1152    _Scheduler_Ask_for_help( thread );
1153  }
[701dd96f]1154}
1155
[c597fb1]1156static inline void _Scheduler_SMP_Insert_scheduled(
[3730a07f]1157  Scheduler_Context *context,
[c597fb1]1158  Scheduler_Node    *node_to_insert,
1159  Priority_Control   priority_to_insert
[48c4a55]1160)
1161{
[0c286e3]1162  Scheduler_SMP_Context *self;
1163
1164  self = _Scheduler_SMP_Get_self( context );
[3730a07f]1165
[48c4a55]1166  _Chain_Insert_ordered_unprotected(
[494c2e3]1167    &self->Scheduled,
[15dbc710]1168    &node_to_insert->Node.Chain,
[0c286e3]1169    &priority_to_insert,
[c597fb1]1170    _Scheduler_SMP_Priority_less_equal
[48c4a55]1171  );
1172}
1173
[351c14d]1174static inline bool _Scheduler_SMP_Ask_for_help(
1175  Scheduler_Context                  *context,
1176  Thread_Control                     *thread,
1177  Scheduler_Node                     *node,
1178  Chain_Node_order                    order,
1179  Scheduler_SMP_Insert                insert_ready,
1180  Scheduler_SMP_Insert                insert_scheduled,
1181  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1182  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1183  Scheduler_SMP_Allocate_processor    allocate_processor
1184)
1185{
1186  Scheduler_Node   *lowest_scheduled;
1187  ISR_lock_Context  lock_context;
1188  bool              success;
1189
[7097962]1190  if ( thread->Scheduler.pinned_scheduler != NULL ) {
1191    /*
1192     * Pinned threads are not allowed to ask for help.  Return success to break
1193     * the loop in _Thread_Ask_for_help() early.
1194     */
1195    return true;
1196  }
1197
[4edcede7]1198  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
[351c14d]1199
1200  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1201
[6771359f]1202  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1203    Scheduler_SMP_Node_state node_state;
1204
1205    node_state = _Scheduler_SMP_Node_state( node );
1206
1207    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
[c597fb1]1208      Priority_Control insert_priority;
[0c286e3]1209
[c597fb1]1210      insert_priority = _Scheduler_SMP_Node_priority( node );
[0c286e3]1211
[c597fb1]1212      if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
[6771359f]1213        _Thread_Scheduler_cancel_need_for_help(
1214          thread,
1215          _Thread_Get_CPU( thread )
1216        );
1217        _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1218        _Thread_Scheduler_release_critical( thread, &lock_context );
1219
1220        _Scheduler_SMP_Preempt(
1221          context,
1222          node,
1223          lowest_scheduled,
1224          allocate_processor
1225        );
1226
[c597fb1]1227        ( *insert_scheduled )( context, node, insert_priority );
[6771359f]1228        ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1229
1230        _Scheduler_Release_idle_thread(
1231          context,
1232          lowest_scheduled,
1233          _Scheduler_SMP_Release_idle_thread
1234        );
1235        success = true;
1236      } else {
1237        _Thread_Scheduler_release_critical( thread, &lock_context );
1238        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
[c597fb1]1239        ( *insert_ready )( context, node, insert_priority );
[6771359f]1240        success = false;
1241      }
1242    } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
[351c14d]1243      _Thread_Scheduler_cancel_need_for_help(
1244        thread,
1245        _Thread_Get_CPU( thread )
1246      );
[6771359f]1247      _Scheduler_Discard_idle_thread(
[351c14d]1248        context,
[6771359f]1249        thread,
[351c14d]1250        node,
1251        _Scheduler_SMP_Release_idle_thread
1252      );
[6771359f]1253      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1254      _Thread_Scheduler_release_critical( thread, &lock_context );
[351c14d]1255      success = true;
1256    } else {
1257      _Thread_Scheduler_release_critical( thread, &lock_context );
1258      success = false;
1259    }
1260  } else {
1261    _Thread_Scheduler_release_critical( thread, &lock_context );
1262    success = false;
1263  }
1264
1265  return success;
1266}
1267
1268static inline void _Scheduler_SMP_Reconsider_help_request(
1269  Scheduler_Context     *context,
1270  Thread_Control        *thread,
1271  Scheduler_Node        *node,
1272  Scheduler_SMP_Extract  extract_from_ready
1273)
1274{
1275  ISR_lock_Context lock_context;
1276
1277  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1278
1279  if (
1280    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1281      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
[6771359f]1282      && node->sticky_level == 1
[351c14d]1283  ) {
1284    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1285    ( *extract_from_ready )( context, node );
1286  }
1287
1288  _Thread_Scheduler_release_critical( thread, &lock_context );
1289}
1290
1291static inline void _Scheduler_SMP_Withdraw_node(
1292  Scheduler_Context                *context,
1293  Thread_Control                   *thread,
1294  Scheduler_Node                   *node,
1295  Thread_Scheduler_state            next_state,
1296  Scheduler_SMP_Extract             extract_from_ready,
1297  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1298  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1299  Scheduler_SMP_Allocate_processor  allocate_processor
1300)
1301{
1302  ISR_lock_Context         lock_context;
1303  Scheduler_SMP_Node_state node_state;
1304
1305  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1306
1307  node_state = _Scheduler_SMP_Node_state( node );
1308  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1309
1310  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1311    Per_CPU_Control *thread_cpu;
1312
1313    thread_cpu = _Thread_Get_CPU( thread );
1314    _Scheduler_Thread_change_state( thread, next_state );
1315    _Thread_Scheduler_release_critical( thread, &lock_context );
1316
[3aad9d9b]1317    _Scheduler_SMP_Extract_from_scheduled( context, node );
[351c14d]1318    _Scheduler_SMP_Schedule_highest_ready(
1319      context,
1320      node,
1321      thread_cpu,
1322      extract_from_ready,
1323      get_highest_ready,
1324      move_from_ready_to_scheduled,
1325      allocate_processor
1326    );
1327  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1328    _Thread_Scheduler_release_critical( thread, &lock_context );
1329    ( *extract_from_ready )( context, node );
1330  } else {
1331    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1332    _Thread_Scheduler_release_critical( thread, &lock_context );
1333  }
1334}
1335
[34487537]1336static inline void _Scheduler_SMP_Do_start_idle(
1337  Scheduler_Context           *context,
1338  Thread_Control              *idle,
1339  Per_CPU_Control             *cpu,
1340  Scheduler_SMP_Register_idle  register_idle
1341)
1342{
1343  Scheduler_SMP_Context *self;
1344  Scheduler_SMP_Node    *node;
1345
1346  self = _Scheduler_SMP_Get_self( context );
1347  node = _Scheduler_SMP_Thread_get_node( idle );
1348
1349  _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
1350  node->state = SCHEDULER_SMP_NODE_SCHEDULED;
1351
1352  _Thread_Set_CPU( idle, cpu );
1353  ( *register_idle )( context, &node->Base, cpu );
1354  _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
1355  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1356}
1357
[05ca53d]1358static inline void _Scheduler_SMP_Add_processor(
[34487537]1359  Scheduler_Context           *context,
1360  Thread_Control              *idle,
1361  Scheduler_SMP_Has_ready      has_ready,
[c597fb1]1362  Scheduler_SMP_Enqueue        enqueue_scheduled,
[34487537]1363  Scheduler_SMP_Register_idle  register_idle
[05ca53d]1364)
1365{
1366  Scheduler_SMP_Context *self;
1367  Scheduler_Node        *node;
1368
1369  self = _Scheduler_SMP_Get_self( context );
1370  idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1371  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1372  node = _Thread_Scheduler_get_home_node( idle );
1373  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
[34487537]1374  ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
[05ca53d]1375
1376  if ( ( *has_ready )( &self->Base ) ) {
[c597fb1]1377    Priority_Control insert_priority;
1378
1379    insert_priority = _Scheduler_SMP_Node_priority( node );
1380    insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1381    ( *enqueue_scheduled )( &self->Base, node, insert_priority );
[05ca53d]1382  } else {
[15dbc710]1383    _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
[05ca53d]1384  }
1385}
1386
1387static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1388  Scheduler_Context     *context,
1389  Per_CPU_Control       *cpu,
1390  Scheduler_SMP_Extract  extract_from_ready,
[c597fb1]1391  Scheduler_SMP_Enqueue  enqueue
[05ca53d]1392)
1393{
1394  Scheduler_SMP_Context *self;
1395  Chain_Node            *chain_node;
1396  Scheduler_Node        *victim_node;
1397  Thread_Control        *victim_user;
1398  Thread_Control        *victim_owner;
1399  Thread_Control        *idle;
1400
1401  self = _Scheduler_SMP_Get_self( context );
1402  chain_node = _Chain_First( &self->Scheduled );
1403
1404  do {
1405    _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1406    victim_node = (Scheduler_Node *) chain_node;
1407    victim_user = _Scheduler_Node_get_user( victim_node );
1408    chain_node = _Chain_Next( chain_node );
1409  } while ( _Thread_Get_CPU( victim_user ) != cpu );
1410
[3aad9d9b]1411  _Scheduler_SMP_Extract_from_scheduled( context, victim_node );
[05ca53d]1412  victim_owner = _Scheduler_Node_get_owner( victim_node );
1413
1414  if ( !victim_owner->is_idle ) {
1415    Scheduler_Node *idle_node;
1416
1417    _Scheduler_Release_idle_thread(
1418      &self->Base,
1419      victim_node,
1420      _Scheduler_SMP_Release_idle_thread
1421    );
1422    idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
1423    idle_node = _Thread_Scheduler_get_home_node( idle );
1424    ( *extract_from_ready )( &self->Base, idle_node );
1425    _Scheduler_SMP_Preempt(
1426      &self->Base,
1427      idle_node,
1428      victim_node,
1429      _Scheduler_SMP_Allocate_processor_exact
1430    );
1431
1432    if ( !_Chain_Is_empty( &self->Scheduled ) ) {
[c597fb1]1433      Priority_Control insert_priority;
1434
1435      insert_priority = _Scheduler_SMP_Node_priority( victim_node );
1436      insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1437      ( *enqueue )( context, victim_node, insert_priority );
[05ca53d]1438    }
1439  } else {
1440    _Assert( victim_owner == victim_user );
1441    _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
1442    idle = victim_owner;
1443    _Scheduler_SMP_Exctract_idle_thread( idle );
1444  }
1445
1446  return idle;
1447}
1448
[34487537]1449static inline void _Scheduler_SMP_Set_affinity(
1450  Scheduler_Context               *context,
1451  Thread_Control                  *thread,
1452  Scheduler_Node                  *node,
1453  void                            *arg,
1454  Scheduler_SMP_Set_affinity       set_affinity,
1455  Scheduler_SMP_Extract            extract_from_ready,
1456  Scheduler_SMP_Get_highest_ready  get_highest_ready,
1457  Scheduler_SMP_Move               move_from_ready_to_scheduled,
[c597fb1]1458  Scheduler_SMP_Enqueue            enqueue,
[34487537]1459  Scheduler_SMP_Allocate_processor allocate_processor
1460)
1461{
1462  Scheduler_SMP_Node_state node_state;
[c597fb1]1463  Priority_Control         insert_priority;
[34487537]1464
1465  node_state = _Scheduler_SMP_Node_state( node );
[c597fb1]1466  insert_priority = _Scheduler_SMP_Node_priority( node );
1467  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
[34487537]1468
1469  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
[3aad9d9b]1470    _Scheduler_SMP_Extract_from_scheduled( context, node );
[34487537]1471    _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1472      context,
1473      node,
1474      _Thread_Get_CPU( thread ),
1475      extract_from_ready,
1476      get_highest_ready,
1477      move_from_ready_to_scheduled,
1478      allocate_processor
1479    );
1480    ( *set_affinity )( context, node, arg );
[c597fb1]1481    ( *enqueue )( context, node, insert_priority );
[34487537]1482  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1483    ( *extract_from_ready )( context, node );
1484    ( *set_affinity )( context, node, arg );
[c597fb1]1485    ( *enqueue )( context, node, insert_priority );
[34487537]1486  } else {
[7097962]1487    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
[34487537]1488    ( *set_affinity )( context, node, arg );
1489  }
1490}
1491
[9d83f58a]1492/** @} */
1493
1494#ifdef __cplusplus
1495}
1496#endif /* __cplusplus */
1497
1498#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.