source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ 9c238e1

5
Last change on this file since 9c238e1 was 9c238e1, checked in by Sebastian Huber <sebastian.huber@…>, on 10/21/16 at 12:33:01

score: Simplify update priority scheduler op

Remove unused return status.

  • Property mode set to 100644
File size: 35.1 KB
RevLine 
[9d83f58a]1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
[351c14d]10 * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
[9d83f58a]11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
[c499856]20 * http://www.rtems.org/license/LICENSE.
[9d83f58a]21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
[38b59a6]27#include <rtems/score/assert.h>
[48c4a55]28#include <rtems/score/chainimpl.h>
[38b59a6]29#include <rtems/score/schedulersimpleimpl.h>
[351c14d]30#include <rtems/bspIo.h>
[9d83f58a]31
32#ifdef __cplusplus
33extern "C" {
34#endif /* __cplusplus */
35
36/**
37 * @addtogroup ScoreSchedulerSMP
38 *
[c6522a65]39 * The scheduler nodes can be in four states
40 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
[f39f667a]41 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
42 * - @ref SCHEDULER_SMP_NODE_READY.
[c6522a65]43 *
[f39f667a]44 * State transitions are triggered via basic operations
[c0bff5e]45 * - _Scheduler_SMP_Enqueue_ordered(),
46 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
[f39f667a]47 * - _Scheduler_SMP_Block().
[c6522a65]48 *
49 * @dot
50 * digraph {
51 *   node [style="filled"];
52 *
53 *   bs [label="BLOCKED"];
54 *   ss [label="SCHEDULED", fillcolor="green"];
55 *   rs [label="READY", fillcolor="red"];
56 *
57 *   edge [label="enqueue"];
58 *   edge [fontcolor="darkgreen", color="darkgreen"];
59 *
60 *   bs -> ss;
61 *
62 *   edge [fontcolor="red", color="red"];
63 *
64 *   bs -> rs;
65 *
66 *   edge [label="enqueue other"];
67 *
68 *   ss -> rs;
69 *
[f39f667a]70 *   edge [label="block"];
[c6522a65]71 *   edge [fontcolor="black", color="black"];
72 *
[b532bb2c]73 *   ss -> bs;
[c6522a65]74 *   rs -> bs;
75 *
[f39f667a]76 *   edge [label="block other"];
[c6522a65]77 *   edge [fontcolor="darkgreen", color="darkgreen"];
78 *
79 *   rs -> ss;
80 * }
81 * @enddot
82 *
83 * During system initialization each processor of the scheduler instance starts
84 * with an idle thread assigned to it.  Lets have a look at an example with two
85 * idle threads I and J with priority 5.  We also have blocked threads A, B and
[2d96533]86 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
87 * with respect to the thread priority from left to right in the below
88 * diagrams.  The highest priority node (lowest priority number) is the
89 * leftmost node.  Since the processor assignment is independent of the thread
90 * priority the processor indices may move from one state to the other.
[c6522a65]91 *
92 * @dot
93 * digraph {
94 *   node [style="filled"];
95 *   edge [dir="none"];
96 *   subgraph {
97 *     rank = same;
98 *
99 *     i [label="I (5)", fillcolor="green"];
100 *     j [label="J (5)", fillcolor="green"];
101 *     a [label="A (1)"];
102 *     b [label="B (2)"];
103 *     c [label="C (3)"];
104 *     i -> j;
105 *   }
106 *
107 *   subgraph {
108 *     rank = same;
109 *
110 *     p0 [label="PROCESSOR 0", shape="box"];
111 *     p1 [label="PROCESSOR 1", shape="box"];
112 *   }
113 *
114 *   i -> p0;
115 *   j -> p1;
116 * }
117 * @enddot
118 *
119 * Lets start A.  For this an enqueue operation is performed.
120 *
121 * @dot
122 * digraph {
123 *   node [style="filled"];
124 *   edge [dir="none"];
125 *
126 *   subgraph {
127 *     rank = same;
128 *
129 *     i [label="I (5)", fillcolor="green"];
130 *     j [label="J (5)", fillcolor="red"];
131 *     a [label="A (1)", fillcolor="green"];
132 *     b [label="B (2)"];
133 *     c [label="C (3)"];
134 *     a -> i;
135 *   }
136 *
137 *   subgraph {
138 *     rank = same;
139 *
140 *     p0 [label="PROCESSOR 0", shape="box"];
141 *     p1 [label="PROCESSOR 1", shape="box"];
142 *   }
143 *
144 *   i -> p0;
145 *   a -> p1;
146 * }
147 * @enddot
148 *
149 * Lets start C.
150 *
151 * @dot
152 * digraph {
153 *   node [style="filled"];
154 *   edge [dir="none"];
155 *
156 *   subgraph {
157 *     rank = same;
158 *
159 *     a [label="A (1)", fillcolor="green"];
160 *     c [label="C (3)", fillcolor="green"];
161 *     i [label="I (5)", fillcolor="red"];
162 *     j [label="J (5)", fillcolor="red"];
163 *     b [label="B (2)"];
164 *     a -> c;
165 *     i -> j;
166 *   }
167 *
168 *   subgraph {
169 *     rank = same;
170 *
171 *     p0 [label="PROCESSOR 0", shape="box"];
172 *     p1 [label="PROCESSOR 1", shape="box"];
173 *   }
174 *
175 *   c -> p0;
176 *   a -> p1;
177 * }
178 * @enddot
179 *
180 * Lets start B.
181 *
182 * @dot
183 * digraph {
184 *   node [style="filled"];
185 *   edge [dir="none"];
186 *
187 *   subgraph {
188 *     rank = same;
189 *
190 *     a [label="A (1)", fillcolor="green"];
191 *     b [label="B (2)", fillcolor="green"];
192 *     c [label="C (3)", fillcolor="red"];
193 *     i [label="I (5)", fillcolor="red"];
194 *     j [label="J (5)", fillcolor="red"];
195 *     a -> b;
196 *     c -> i -> j;
197 *   }
198 *
199 *   subgraph {
200 *     rank = same;
201 *
202 *     p0 [label="PROCESSOR 0", shape="box"];
203 *     p1 [label="PROCESSOR 1", shape="box"];
204 *   }
205 *
206 *   b -> p0;
207 *   a -> p1;
208 * }
209 * @enddot
210 *
[f39f667a]211 * Lets change the priority of thread A to 4.
[c6522a65]212 *
213 * @dot
214 * digraph {
215 *   node [style="filled"];
216 *   edge [dir="none"];
217 *
218 *   subgraph {
219 *     rank = same;
220 *
221 *     b [label="B (2)", fillcolor="green"];
222 *     c [label="C (3)", fillcolor="green"];
223 *     a [label="A (4)", fillcolor="red"];
224 *     i [label="I (5)", fillcolor="red"];
225 *     j [label="J (5)", fillcolor="red"];
226 *     b -> c;
227 *     a -> i -> j;
228 *   }
229 *
230 *   subgraph {
231 *     rank = same;
232 *
233 *     p0 [label="PROCESSOR 0", shape="box"];
234 *     p1 [label="PROCESSOR 1", shape="box"];
235 *   }
236 *
237 *   b -> p0;
238 *   c -> p1;
239 * }
240 * @enddot
241 *
[f39f667a]242 * Now perform a blocking operation with thread B.  Please note that thread A
243 * migrated now from processor 0 to processor 1 and thread C still executes on
244 * processor 1.
[c6522a65]245 *
246 * @dot
247 * digraph {
248 *   node [style="filled"];
249 *   edge [dir="none"];
250 *
251 *   subgraph {
252 *     rank = same;
253 *
254 *     c [label="C (3)", fillcolor="green"];
[f39f667a]255 *     a [label="A (4)", fillcolor="green"];
[c6522a65]256 *     i [label="I (5)", fillcolor="red"];
257 *     j [label="J (5)", fillcolor="red"];
[f39f667a]258 *     b [label="B (2)"];
259 *     c -> a;
[c6522a65]260 *     i -> j;
261 *   }
262 *
263 *   subgraph {
264 *     rank = same;
265 *
266 *     p0 [label="PROCESSOR 0", shape="box"];
267 *     p1 [label="PROCESSOR 1", shape="box"];
268 *   }
269 *
[f39f667a]270 *   a -> p0;
[c6522a65]271 *   c -> p1;
272 * }
273 * @enddot
274 *
[9d83f58a]275 * @{
276 */
277
[8f0c7a46]278typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
[238629f]279  Scheduler_Context *context,
[8f0c7a46]280  Scheduler_Node    *node
[238629f]281);
282
[8f0c7a46]283typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
[238629f]284  Scheduler_Context *context,
[8f0c7a46]285  Scheduler_Node    *filter,
[238629f]286  Chain_Node_order   order
[48c4a55]287);
288
289typedef void ( *Scheduler_SMP_Extract )(
[3730a07f]290  Scheduler_Context *context,
[8f0c7a46]291  Scheduler_Node    *node_to_extract
[48c4a55]292);
293
294typedef void ( *Scheduler_SMP_Insert )(
[3730a07f]295  Scheduler_Context *context,
[8f0c7a46]296  Scheduler_Node    *node_to_insert
[48c4a55]297);
298
299typedef void ( *Scheduler_SMP_Move )(
[3730a07f]300  Scheduler_Context *context,
[8f0c7a46]301  Scheduler_Node    *node_to_move
[48c4a55]302);
303
[351c14d]304typedef bool ( *Scheduler_SMP_Ask_for_help )(
305  Scheduler_Context *context,
306  Thread_Control    *thread,
307  Scheduler_Node    *node
308);
309
[f39f667a]310typedef void ( *Scheduler_SMP_Update )(
311  Scheduler_Context *context,
[8f0c7a46]312  Scheduler_Node    *node_to_update,
[d9b54da]313  Priority_Control   new_priority
[f39f667a]314);
315
[8568341]316typedef Thread_Control *( *Scheduler_SMP_Enqueue )(
317  Scheduler_Context *context,
318  Scheduler_Node    *node_to_enqueue,
319  Thread_Control    *needs_help
320);
321
322typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )(
[f39f667a]323  Scheduler_Context *context,
[8f0c7a46]324  Scheduler_Node    *node_to_enqueue
[f39f667a]325);
326
[238629f]327typedef void ( *Scheduler_SMP_Allocate_processor )(
[8f0c7a46]328  Scheduler_Context *context,
[edb020c]329  Thread_Control    *scheduled_thread,
330  Thread_Control    *victim_thread,
331  Per_CPU_Control   *victim_cpu
[238629f]332);
333
[8f0c7a46]334static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
335  const Chain_Node *to_insert,
336  const Chain_Node *next
337)
338{
339  const Scheduler_SMP_Node *node_to_insert =
340    (const Scheduler_SMP_Node *) to_insert;
341  const Scheduler_SMP_Node *node_next =
342    (const Scheduler_SMP_Node *) next;
343
344  return node_to_insert->priority <= node_next->priority;
345}
346
347static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
348  const Chain_Node *to_insert,
349  const Chain_Node *next
350)
351{
352  const Scheduler_SMP_Node *node_to_insert =
353    (const Scheduler_SMP_Node *) to_insert;
354  const Scheduler_SMP_Node *node_next =
355    (const Scheduler_SMP_Node *) next;
356
357  return node_to_insert->priority < node_next->priority;
358}
359
[3730a07f]360static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
361  Scheduler_Context *context
362)
363{
364  return (Scheduler_SMP_Context *) context;
365}
366
[494c2e3]367static inline void _Scheduler_SMP_Initialize(
[e1598a6]368  Scheduler_SMP_Context *self
[494c2e3]369)
[9d83f58a]370{
[494c2e3]371  _Chain_Initialize_empty( &self->Scheduled );
[5c3d250]372  _Chain_Initialize_empty( &self->Idle_threads );
[9d83f58a]373}
374
[08d9760]375static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
[beab7329]376  Thread_Control *thread
377)
378{
[08d9760]379  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread );
[beab7329]380}
381
[5c3d250]382static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
383  Thread_Control *thread
384)
385{
[300f6a48]386  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_own_node( thread );
[5c3d250]387}
388
[8f0c7a46]389static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
390  Scheduler_Node *node
391)
392{
393  return (Scheduler_SMP_Node *) node;
394}
395
[501043a]396static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
397  const Scheduler_Node *node
398)
399{
400  return ( (const Scheduler_SMP_Node *) node )->state;
401}
402
403static inline Priority_Control _Scheduler_SMP_Node_priority(
404  const Scheduler_Node *node
405)
406{
407  return ( (const Scheduler_SMP_Node *) node )->priority;
408}
409
[beab7329]410static inline void _Scheduler_SMP_Node_initialize(
[300f6a48]411  const Scheduler_Control *scheduler,
412  Scheduler_SMP_Node      *node,
413  Thread_Control          *thread,
414  Priority_Control         priority
[beab7329]415)
416{
[300f6a48]417  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
[beab7329]418  node->state = SCHEDULER_SMP_NODE_BLOCKED;
[9bfad8c]419  node->priority = priority;
[beab7329]420}
421
[8f0c7a46]422static inline void _Scheduler_SMP_Node_update_priority(
423  Scheduler_SMP_Node *node,
424  Priority_Control    new_priority
425)
426{
427  node->priority = new_priority;
428}
429
[f39f667a]430extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
[beab7329]431
432static inline void _Scheduler_SMP_Node_change_state(
[3a72411]433  Scheduler_Node           *node,
434  Scheduler_SMP_Node_state  new_state
[beab7329]435)
436{
[3a72411]437  Scheduler_SMP_Node *the_node;
438
439  the_node = _Scheduler_SMP_Node_downcast( node );
[beab7329]440  _Assert(
[3a72411]441    _Scheduler_SMP_Node_valid_state_changes[ the_node->state ][ new_state ]
[beab7329]442  );
443
[3a72411]444  the_node->state = new_state;
[beab7329]445}
446
[38b59a6]447static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
[8f0c7a46]448  const Scheduler_Context *context,
449  const Per_CPU_Control   *cpu
[38b59a6]450)
451{
[8f0c7a46]452  return cpu->scheduler_context == context;
[38b59a6]453}
454
[5c3d250]455static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
[27783f6]456  Scheduler_Context *context
[5c3d250]457)
458{
459  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
460  Thread_Control *idle = (Thread_Control *)
461    _Chain_Get_first_unprotected( &self->Idle_threads );
462
463  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
464
465  return idle;
466}
467
468static inline void _Scheduler_SMP_Release_idle_thread(
[27783f6]469  Scheduler_Context *context,
470  Thread_Control    *idle
[5c3d250]471)
472{
473  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
474
475  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
476}
477
[19e41767]478static inline void _Scheduler_SMP_Allocate_processor_lazy(
[8f0c7a46]479  Scheduler_Context *context,
[19e41767]480  Thread_Control    *scheduled_thread,
[edb020c]481  Thread_Control    *victim_thread,
482  Per_CPU_Control   *victim_cpu
[fc2ad63]483)
484{
[8f0c7a46]485  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
[38b59a6]486  Per_CPU_Control *cpu_self = _Per_CPU_Get();
[fc2ad63]487  Thread_Control *heir;
488
[38b59a6]489  _Assert( _ISR_Get_level() != 0 );
[fc2ad63]490
[8f0c7a46]491  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
492    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
493      heir = scheduled_cpu->heir;
[835b88b]494      _Thread_Dispatch_update_heir(
[8f0c7a46]495        cpu_self,
496        scheduled_cpu,
497        scheduled_thread
498      );
[38b59a6]499    } else {
500      /* We have to force a migration to our processor set */
[8f0c7a46]501      heir = scheduled_thread;
[38b59a6]502    }
[fc2ad63]503  } else {
[8f0c7a46]504    heir = scheduled_thread;
[fc2ad63]505  }
506
[8f0c7a46]507  if ( heir != victim_thread ) {
508    _Thread_Set_CPU( heir, victim_cpu );
[835b88b]509    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
[fc2ad63]510  }
511}
512
[09c87fb]513/*
514 * This method is slightly different from
515 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
516 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
517 * but does not take into account affinity.
518 */
519static inline void _Scheduler_SMP_Allocate_processor_exact(
520  Scheduler_Context *context,
521  Thread_Control    *scheduled_thread,
[edb020c]522  Thread_Control    *victim_thread,
523  Per_CPU_Control   *victim_cpu
[09c87fb]524)
525{
526  Per_CPU_Control *cpu_self = _Per_CPU_Get();
527
528  (void) context;
529
530  _Thread_Set_CPU( scheduled_thread, victim_cpu );
531  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
532}
533
[19e41767]534static inline void _Scheduler_SMP_Allocate_processor(
535  Scheduler_Context                *context,
536  Scheduler_Node                   *scheduled,
[a7a8ec03]537  Thread_Control                   *victim_thread,
[edb020c]538  Per_CPU_Control                  *victim_cpu,
[19e41767]539  Scheduler_SMP_Allocate_processor  allocate_processor
540)
541{
[ac532f3]542  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
[19e41767]543
[3a72411]544  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
[19e41767]545
[edb020c]546  ( *allocate_processor )(
547    context,
548    scheduled_thread,
549    victim_thread,
550    victim_cpu
551  );
[19e41767]552}
553
[a7a8ec03]554static inline Thread_Control *_Scheduler_SMP_Preempt(
555  Scheduler_Context                *context,
556  Scheduler_Node                   *scheduled,
557  Scheduler_Node                   *victim,
558  Scheduler_SMP_Allocate_processor  allocate_processor
559)
560{
561  Thread_Control   *victim_thread;
562  ISR_lock_Context  lock_context;
[edb020c]563  Per_CPU_Control  *victim_cpu;
[a7a8ec03]564
565  victim_thread = _Scheduler_Node_get_user( victim );
566  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
567
568  _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
[351c14d]569
[edb020c]570  victim_cpu = _Thread_Get_CPU( victim_thread );
[351c14d]571
572  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
573    _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
574
575    if ( victim_thread->Scheduler.helping_nodes > 0 ) {
576      _Per_CPU_Acquire( victim_cpu );
577      _Chain_Append_unprotected(
578        &victim_cpu->Threads_in_need_for_help,
579        &victim_thread->Scheduler.Help_node
580      );
581      _Per_CPU_Release( victim_cpu );
582    }
583  }
584
[a7a8ec03]585  _Thread_Scheduler_release_critical( victim_thread, &lock_context );
586
587  _Scheduler_SMP_Allocate_processor(
588    context,
589    scheduled,
590    victim_thread,
[edb020c]591    victim_cpu,
[a7a8ec03]592    allocate_processor
593  );
594
595  return victim_thread;
596}
597
[8f0c7a46]598static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
[238629f]599  Scheduler_Context *context,
[8f0c7a46]600  Scheduler_Node    *filter,
[238629f]601  Chain_Node_order   order
[aea4a91]602)
603{
[238629f]604  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
[494c2e3]605  Chain_Control *scheduled = &self->Scheduled;
[8f0c7a46]606  Scheduler_Node *lowest_scheduled =
607    (Scheduler_Node *) _Chain_Last( scheduled );
[aea4a91]608
[8f0c7a46]609  (void) filter;
610  (void) order;
[aea4a91]611
[5c632c4]612  _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
[351c14d]613  _Assert(
614    _Chain_Next( &lowest_scheduled->Node ) == _Chain_Tail( scheduled )
615  );
[238629f]616
[8f0c7a46]617  return lowest_scheduled;
[aea4a91]618}
619
[5c3d250]620static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
621  Scheduler_Context                *context,
622  Scheduler_Node                   *node,
623  Scheduler_Node                   *lowest_scheduled,
624  Scheduler_SMP_Insert              insert_scheduled,
625  Scheduler_SMP_Move                move_from_scheduled_to_ready,
[27783f6]626  Scheduler_SMP_Allocate_processor  allocate_processor
[5c3d250]627)
628{
629  Thread_Control *needs_help;
[be0366b]630  Scheduler_Try_to_schedule_action action;
[5c3d250]631
[be0366b]632  action = _Scheduler_Try_to_schedule_node(
[5c3d250]633    context,
634    node,
[be0366b]635    _Scheduler_Node_get_idle( lowest_scheduled ),
636    _Scheduler_SMP_Get_idle_thread
[5c3d250]637  );
638
[be0366b]639  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
[a7a8ec03]640    Thread_Control *lowest_scheduled_user;
[be0366b]641    Thread_Control *idle;
[5c3d250]642
[a7a8ec03]643    lowest_scheduled_user = _Scheduler_SMP_Preempt(
[be0366b]644      context,
645      node,
646      lowest_scheduled,
647      allocate_processor
648    );
649
650    ( *insert_scheduled )( context, node );
651    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
652
653    idle = _Scheduler_Release_idle_thread(
654      context,
655      lowest_scheduled,
656      _Scheduler_SMP_Release_idle_thread
657    );
658    if ( idle == NULL ) {
659      needs_help = lowest_scheduled_user;
660    } else {
661      needs_help = NULL;
662    }
663  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
664    _Scheduler_SMP_Node_change_state(
[3a72411]665      lowest_scheduled,
[be0366b]666      SCHEDULER_SMP_NODE_READY
667    );
[3a72411]668    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
[be0366b]669
670    ( *insert_scheduled )( context, node );
671    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
672
673    _Scheduler_Exchange_idle_thread(
674      node,
675      lowest_scheduled,
676      _Scheduler_Node_get_idle( lowest_scheduled )
677    );
678
679    needs_help = NULL;
[5c3d250]680  } else {
[be0366b]681    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[3a72411]682    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
[5c3d250]683    needs_help = NULL;
684  }
685
686  return needs_help;
687}
688
[c6522a65]689/**
[8f0c7a46]690 * @brief Enqueues a node according to the specified order function.
[c6522a65]691 *
[8f0c7a46]692 * The node must not be in the scheduled state.
[c0bff5e]693 *
[c6522a65]694 * @param[in] context The scheduler instance context.
[8f0c7a46]695 * @param[in] node The node to enqueue.
[8568341]696 * @param[in] needs_help The thread needing help in case the node cannot be
697 *   scheduled.
[c6522a65]698 * @param[in] order The order function.
699 * @param[in] insert_ready Function to insert a node into the set of ready
[238629f]700 *   nodes.
[c6522a65]701 * @param[in] insert_scheduled Function to insert a node into the set of
[238629f]702 *   scheduled nodes.
[c6522a65]703 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
[238629f]704 *   of scheduled nodes to the set of ready nodes.
[8f0c7a46]705 * @param[in] get_lowest_scheduled Function to select the node from the
[82df6f3]706 *   scheduled nodes to replace.  It may not be possible to find one, in this
707 *   case a pointer must be returned so that the order functions returns false
708 *   if this pointer is passed as the second argument to the order function.
[8f0c7a46]709 * @param[in] allocate_processor Function to allocate a processor to a node
[238629f]710 *   based on the rules of the scheduler.
[c6522a65]711 */
[8568341]712static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
[d9b54da]713  Scheduler_Context                  *context,
[8f0c7a46]714  Scheduler_Node                     *node,
[8568341]715  Thread_Control                     *needs_help,
[d9b54da]716  Chain_Node_order                    order,
[238629f]717  Scheduler_SMP_Insert                insert_ready,
718  Scheduler_SMP_Insert                insert_scheduled,
719  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
720  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
[27783f6]721  Scheduler_SMP_Allocate_processor    allocate_processor
[48c4a55]722)
[c0bff5e]723{
[8f0c7a46]724  Scheduler_Node *lowest_scheduled =
725    ( *get_lowest_scheduled )( context, node, order );
[c0bff5e]726
[8f0c7a46]727  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
[5c3d250]728    needs_help = _Scheduler_SMP_Enqueue_to_scheduled(
[19e41767]729      context,
730      node,
731      lowest_scheduled,
[5c3d250]732      insert_scheduled,
733      move_from_scheduled_to_ready,
[27783f6]734      allocate_processor
[19e41767]735    );
[c0bff5e]736  } else {
[8f0c7a46]737    ( *insert_ready )( context, node );
[c0bff5e]738  }
[8568341]739
740  return needs_help;
[c0bff5e]741}
742
743/**
[8f0c7a46]744 * @brief Enqueues a scheduled node according to the specified order
[c0bff5e]745 * function.
746 *
747 * @param[in] context The scheduler instance context.
[8f0c7a46]748 * @param[in] node The node to enqueue.
[c0bff5e]749 * @param[in] order The order function.
[5c3d250]750 * @param[in] extract_from_ready Function to extract a node from the set of
751 *   ready nodes.
[c0bff5e]752 * @param[in] get_highest_ready Function to get the highest ready node.
753 * @param[in] insert_ready Function to insert a node into the set of ready
[238629f]754 *   nodes.
[c0bff5e]755 * @param[in] insert_scheduled Function to insert a node into the set of
[238629f]756 *   scheduled nodes.
[c0bff5e]757 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
[238629f]758 *   of ready nodes to the set of scheduled nodes.
[8f0c7a46]759 * @param[in] allocate_processor Function to allocate a processor to a node
[238629f]760 *   based on the rules of the scheduler.
[c0bff5e]761 */
[8568341]762static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
[d9b54da]763  Scheduler_Context                *context,
[8f0c7a46]764  Scheduler_Node                   *node,
[238629f]765  Chain_Node_order                  order,
[5c3d250]766  Scheduler_SMP_Extract             extract_from_ready,
[238629f]767  Scheduler_SMP_Get_highest_ready   get_highest_ready,
768  Scheduler_SMP_Insert              insert_ready,
769  Scheduler_SMP_Insert              insert_scheduled,
770  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]771  Scheduler_SMP_Allocate_processor  allocate_processor
[c0bff5e]772)
[48c4a55]773{
[d057d653]774  while ( true ) {
775    Scheduler_Node                   *highest_ready;
776    Scheduler_Try_to_schedule_action  action;
[48c4a55]777
[d057d653]778    highest_ready = ( *get_highest_ready )( context, node );
[5c3d250]779
780    /*
781     * The node has been extracted from the scheduled chain.  We have to place
782     * it now on the scheduled or ready set.
783     */
784    if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
785      ( *insert_scheduled )( context, node );
[d057d653]786      return NULL;
787    }
[5c3d250]788
[d057d653]789    action = _Scheduler_Try_to_schedule_node(
790      context,
791      highest_ready,
792      _Scheduler_Node_get_idle( node ),
793      _Scheduler_SMP_Get_idle_thread
794    );
795
796    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
[a7a8ec03]797      Thread_Control *user;
[d057d653]798      Thread_Control *idle;
[5c3d250]799
[a7a8ec03]800      user = _Scheduler_SMP_Preempt(
[5c3d250]801        context,
802        highest_ready,
[d057d653]803        node,
804        allocate_processor
[5c3d250]805      );
[c0bff5e]806
[d057d653]807      ( *insert_ready )( context, node );
808      ( *move_from_ready_to_scheduled )( context, highest_ready );
809
810      idle = _Scheduler_Release_idle_thread(
811        context,
812        node,
813        _Scheduler_SMP_Release_idle_thread
814      );
815
816      if ( idle == NULL ) {
817        return user;
[be0366b]818      } else {
[d057d653]819        return NULL;
820      }
821    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
[3a72411]822      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
[d057d653]823      _Scheduler_SMP_Node_change_state(
[3a72411]824        highest_ready,
[d057d653]825        SCHEDULER_SMP_NODE_SCHEDULED
826      );
[19e41767]827
[d057d653]828      ( *insert_ready )( context, node );
829      ( *move_from_ready_to_scheduled )( context, highest_ready );
[8568341]830
[d057d653]831      _Scheduler_Exchange_idle_thread(
832        highest_ready,
833        node,
834        _Scheduler_Node_get_idle( node )
835      );
836      return NULL;
837    } else {
838      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[be0366b]839
[d057d653]840      _Scheduler_SMP_Node_change_state(
[3a72411]841        highest_ready,
[d057d653]842        SCHEDULER_SMP_NODE_BLOCKED
843      );
[8568341]844
[d057d653]845      ( *extract_from_ready )( context, highest_ready );
846    }
847  }
[48c4a55]848}
849
[c0bff5e]850static inline void _Scheduler_SMP_Extract_from_scheduled(
[8f0c7a46]851  Scheduler_Node *node
[c0bff5e]852)
[f39f667a]853{
[8f0c7a46]854  _Chain_Extract_unprotected( &node->Node );
[f39f667a]855}
856
[48c4a55]857static inline void _Scheduler_SMP_Schedule_highest_ready(
[d9b54da]858  Scheduler_Context                *context,
[8f0c7a46]859  Scheduler_Node                   *victim,
[edb020c]860  Per_CPU_Control                  *victim_cpu,
[5c3d250]861  Scheduler_SMP_Extract             extract_from_ready,
[d9b54da]862  Scheduler_SMP_Get_highest_ready   get_highest_ready,
863  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]864  Scheduler_SMP_Allocate_processor  allocate_processor
[48c4a55]865)
866{
[d057d653]867  Scheduler_Try_to_schedule_action action;
868
[be0366b]869  do {
[5c3d250]870    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
871
[be0366b]872    action = _Scheduler_Try_to_schedule_node(
873      context,
874      highest_ready,
875      NULL,
876      _Scheduler_SMP_Get_idle_thread
877    );
878
879    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
[5c3d250]880      _Scheduler_SMP_Allocate_processor(
881        context,
882        highest_ready,
[a7a8ec03]883        _Scheduler_Node_get_user( victim ),
[edb020c]884        victim_cpu,
[5c3d250]885        allocate_processor
886      );
[48c4a55]887
[5c3d250]888      ( *move_from_ready_to_scheduled )( context, highest_ready );
889    } else {
[be0366b]890      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
891
[5c3d250]892      _Scheduler_SMP_Node_change_state(
[3a72411]893        highest_ready,
[5c3d250]894        SCHEDULER_SMP_NODE_BLOCKED
895      );
[19e41767]896
[5c3d250]897      ( *extract_from_ready )( context, highest_ready );
898    }
[d057d653]899  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[48c4a55]900}
901
[c6522a65]902/**
[f39f667a]903 * @brief Blocks a thread.
[c6522a65]904 *
905 * @param[in] context The scheduler instance context.
906 * @param[in] thread The thread of the scheduling operation.
[e382a1b]907 * @param[in] node The scheduler node of the thread to block.
[f39f667a]908 * @param[in] extract_from_ready Function to extract a node from the set of
[5c3d250]909 *   ready nodes.
[c6522a65]910 * @param[in] get_highest_ready Function to get the highest ready node.
911 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
[5c3d250]912 *   of ready nodes to the set of scheduled nodes.
[c6522a65]913 */
[f39f667a]914static inline void _Scheduler_SMP_Block(
[d9b54da]915  Scheduler_Context                *context,
916  Thread_Control                   *thread,
[e382a1b]917  Scheduler_Node                   *node,
[d9b54da]918  Scheduler_SMP_Extract             extract_from_ready,
919  Scheduler_SMP_Get_highest_ready   get_highest_ready,
920  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]921  Scheduler_SMP_Allocate_processor  allocate_processor
[48c4a55]922)
923{
[edb020c]924  Scheduler_SMP_Node_state  node_state;
925  Per_CPU_Control          *thread_cpu;
[cceb19f4]926
[e382a1b]927  node_state = _Scheduler_SMP_Node_state( node );
[cceb19f4]928
[edb020c]929  thread_cpu = _Scheduler_Block_node(
[5c3d250]930    context,
[cceb19f4]931    thread,
[e382a1b]932    node,
933    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
[27783f6]934    _Scheduler_SMP_Get_idle_thread
[5c3d250]935  );
[351c14d]936
[edb020c]937  if ( thread_cpu != NULL ) {
[3a72411]938    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
[beab7329]939
[e382a1b]940    if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
941      _Scheduler_SMP_Extract_from_scheduled( node );
[5c3d250]942      _Scheduler_SMP_Schedule_highest_ready(
943        context,
[e382a1b]944        node,
[edb020c]945        thread_cpu,
[5c3d250]946        extract_from_ready,
947        get_highest_ready,
948        move_from_ready_to_scheduled,
[27783f6]949        allocate_processor
[5c3d250]950      );
[351c14d]951    } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
[e382a1b]952      ( *extract_from_ready )( context, node );
[5c3d250]953    }
[48c4a55]954  }
955}
956
[8568341]957static inline Thread_Control *_Scheduler_SMP_Unblock(
[9bfad8c]958  Scheduler_Context     *context,
959  Thread_Control        *thread,
[72e0bdb]960  Scheduler_Node        *node,
[9bfad8c]961  Scheduler_SMP_Update   update,
962  Scheduler_SMP_Enqueue  enqueue_fifo
[c0bff5e]963)
964{
[72e0bdb]965  Scheduler_SMP_Node_state  node_state;
966  bool                      unblock;
967  Thread_Control           *needs_help;
[9bfad8c]968
[72e0bdb]969  node_state = _Scheduler_SMP_Node_state( node );
[9bfad8c]970  unblock = _Scheduler_Unblock_node(
[5c3d250]971    context,
972    thread,
[72e0bdb]973    node,
974    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
[27783f6]975    _Scheduler_SMP_Release_idle_thread
[5c3d250]976  );
[c0bff5e]977
[5c3d250]978  if ( unblock ) {
[9bfad8c]979    Priority_Control new_priority;
980    bool             prepend_it;
981
[72e0bdb]982    new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
[9bfad8c]983    (void) prepend_it;
984
[72e0bdb]985    if ( new_priority != _Scheduler_SMP_Node_priority( node ) ) {
986      ( *update )( context, node, new_priority );
[9bfad8c]987    }
988
[72e0bdb]989    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
[3a72411]990      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
[cceb19f4]991
[72e0bdb]992      needs_help = ( *enqueue_fifo )( context, node, thread );
[cceb19f4]993    } else {
[72e0bdb]994      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
[79569ae]995      _Assert(
[72e0bdb]996        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
997          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
[79569ae]998      );
[72e0bdb]999      _Assert( node->idle == NULL );
[c0bff5e]1000
[72e0bdb]1001      if ( node->accepts_help == thread ) {
[cceb19f4]1002        needs_help = thread;
1003      } else {
1004        needs_help = NULL;
1005      }
1006    }
[5c3d250]1007  } else {
1008    needs_help = NULL;
1009  }
1010
1011  return needs_help;
[c0bff5e]1012}
1013
[9c238e1]1014static inline void _Scheduler_SMP_Update_priority(
[8568341]1015  Scheduler_Context               *context,
1016  Thread_Control                  *thread,
[501043a]1017  Scheduler_Node                  *node,
[8568341]1018  Scheduler_SMP_Extract            extract_from_ready,
1019  Scheduler_SMP_Update             update,
1020  Scheduler_SMP_Enqueue            enqueue_fifo,
1021  Scheduler_SMP_Enqueue            enqueue_lifo,
1022  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo,
[351c14d]1023  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_lifo,
1024  Scheduler_SMP_Ask_for_help       ask_for_help
[48c4a55]1025)
1026{
[501043a]1027  Priority_Control         new_priority;
1028  bool                     prepend_it;
1029  Scheduler_SMP_Node_state node_state;
[9bfad8c]1030
[501043a]1031  new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
[9bfad8c]1032
[501043a]1033  if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) {
[351c14d]1034    if ( _Thread_Is_ready( thread ) ) {
1035      ( *ask_for_help )( context, thread, node );
1036    }
1037
[9c238e1]1038    return;
[9bfad8c]1039  }
[a336d51]1040
[501043a]1041  node_state = _Scheduler_SMP_Node_state( node );
1042
1043  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1044    _Scheduler_SMP_Extract_from_scheduled( node );
[c0bff5e]1045
[501043a]1046    ( *update )( context, node, new_priority );
[c0bff5e]1047
1048    if ( prepend_it ) {
[9c238e1]1049      ( *enqueue_scheduled_lifo )( context, node );
[c0bff5e]1050    } else {
[9c238e1]1051      ( *enqueue_scheduled_fifo )( context, node );
[c0bff5e]1052    }
[501043a]1053  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1054    ( *extract_from_ready )( context, node );
[48c4a55]1055
[501043a]1056    ( *update )( context, node, new_priority );
[f39f667a]1057
[c0bff5e]1058    if ( prepend_it ) {
[9c238e1]1059      ( *enqueue_lifo )( context, node, NULL );
[c0bff5e]1060    } else {
[9c238e1]1061      ( *enqueue_fifo )( context, node, NULL );
[c0bff5e]1062    }
[5c3d250]1063  } else {
[501043a]1064    ( *update )( context, node, new_priority );
[5c3d250]1065
[351c14d]1066    if ( _Thread_Is_ready( thread ) ) {
1067      ( *ask_for_help )( context, thread, node );
1068    }
[f39f667a]1069  }
[48c4a55]1070}
1071
[d097b546]1072static inline Thread_Control *_Scheduler_SMP_Ask_for_help_X(
[5c3d250]1073  Scheduler_Context                  *context,
1074  Thread_Control                     *offers_help,
1075  Thread_Control                     *needs_help,
[27783f6]1076  Scheduler_SMP_Enqueue               enqueue_fifo
[5c3d250]1077)
1078{
1079  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help );
1080  Thread_Control *next_needs_help = NULL;
1081  Thread_Control *previous_accepts_help;
1082
1083  previous_accepts_help = node->Base.accepts_help;
1084  node->Base.accepts_help = needs_help;
1085
1086  switch ( node->state ) {
1087    case SCHEDULER_SMP_NODE_READY:
1088      next_needs_help =
1089        _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help );
1090      break;
1091    case SCHEDULER_SMP_NODE_SCHEDULED:
1092      next_needs_help = _Scheduler_Ask_scheduled_node_for_help(
1093        context,
1094        &node->Base,
1095        offers_help,
1096        needs_help,
1097        previous_accepts_help,
[27783f6]1098        _Scheduler_SMP_Release_idle_thread
[5c3d250]1099      );
1100      break;
1101    case SCHEDULER_SMP_NODE_BLOCKED:
1102      if (
1103        _Scheduler_Ask_blocked_node_for_help(
1104          context,
1105          &node->Base,
1106          offers_help,
1107          needs_help
1108        )
1109      ) {
[3a72411]1110        _Scheduler_SMP_Node_change_state(
1111          &node->Base,
1112          SCHEDULER_SMP_NODE_READY
1113        );
[5c3d250]1114
1115        next_needs_help = ( *enqueue_fifo )(
1116          context,
1117          &node->Base,
1118          needs_help
1119        );
1120      }
1121      break;
1122  }
1123
1124  return next_needs_help;
1125}
1126
[8568341]1127static inline Thread_Control *_Scheduler_SMP_Yield(
1128  Scheduler_Context               *context,
1129  Thread_Control                  *thread,
[2df4abc]1130  Scheduler_Node                  *node,
[8568341]1131  Scheduler_SMP_Extract            extract_from_ready,
1132  Scheduler_SMP_Enqueue            enqueue_fifo,
1133  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo
[701dd96f]1134)
1135{
[6a82f1ae]1136  Thread_Control           *needs_help;
1137  Scheduler_SMP_Node_state  node_state;
1138
1139  node_state = _Scheduler_SMP_Node_state( node );
[701dd96f]1140
[6a82f1ae]1141  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
[2df4abc]1142    _Scheduler_SMP_Extract_from_scheduled( node );
[701dd96f]1143
[2df4abc]1144    needs_help = ( *enqueue_scheduled_fifo )( context, node );
[6a82f1ae]1145  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
[2df4abc]1146    ( *extract_from_ready )( context, node );
[701dd96f]1147
[2df4abc]1148    needs_help = ( *enqueue_fifo )( context, node, NULL );
[6a82f1ae]1149  } else {
1150    needs_help = thread;
[701dd96f]1151  }
[8568341]1152
1153  return needs_help;
[701dd96f]1154}
1155
[48c4a55]1156static inline void _Scheduler_SMP_Insert_scheduled_lifo(
[3730a07f]1157  Scheduler_Context *context,
[8f0c7a46]1158  Scheduler_Node    *node_to_insert
[48c4a55]1159)
1160{
[3730a07f]1161  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1162
[48c4a55]1163  _Chain_Insert_ordered_unprotected(
[494c2e3]1164    &self->Scheduled,
[8f0c7a46]1165    &node_to_insert->Node,
1166    _Scheduler_SMP_Insert_priority_lifo_order
[48c4a55]1167  );
1168}
1169
1170static inline void _Scheduler_SMP_Insert_scheduled_fifo(
[3730a07f]1171  Scheduler_Context *context,
[8f0c7a46]1172  Scheduler_Node    *node_to_insert
[48c4a55]1173)
1174{
[3730a07f]1175  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1176
[48c4a55]1177  _Chain_Insert_ordered_unprotected(
[494c2e3]1178    &self->Scheduled,
[8f0c7a46]1179    &node_to_insert->Node,
1180    _Scheduler_SMP_Insert_priority_fifo_order
[48c4a55]1181  );
1182}
1183
[351c14d]1184static inline bool _Scheduler_SMP_Ask_for_help(
1185  Scheduler_Context                  *context,
1186  Thread_Control                     *thread,
1187  Scheduler_Node                     *node,
1188  Chain_Node_order                    order,
1189  Scheduler_SMP_Insert                insert_ready,
1190  Scheduler_SMP_Insert                insert_scheduled,
1191  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1192  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1193  Scheduler_SMP_Allocate_processor    allocate_processor
1194)
1195{
1196  Scheduler_Node   *lowest_scheduled;
1197  ISR_lock_Context  lock_context;
1198  bool              success;
1199
1200  lowest_scheduled = ( *get_lowest_scheduled )( context, node, order );
1201
1202  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1203
1204  if (
1205    thread->Scheduler.state == THREAD_SCHEDULER_READY
1206      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_BLOCKED
1207  ) {
1208    if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
1209      _Thread_Scheduler_cancel_need_for_help(
1210        thread,
1211        _Thread_Get_CPU( thread )
1212      );
1213      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1214      _Thread_Scheduler_release_critical( thread, &lock_context );
1215
1216      _Scheduler_SMP_Preempt(
1217        context,
1218        node,
1219        lowest_scheduled,
1220        allocate_processor
1221      );
1222
1223      ( *insert_scheduled )( context, node );
1224      ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1225
1226      _Scheduler_Release_idle_thread(
1227        context,
1228        lowest_scheduled,
1229        _Scheduler_SMP_Release_idle_thread
1230      );
1231      success = true;
1232    } else {
1233      _Thread_Scheduler_release_critical( thread, &lock_context );
1234      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1235      ( *insert_ready )( context, node );
1236      success = false;
1237    }
1238  } else {
1239    _Thread_Scheduler_release_critical( thread, &lock_context );
1240    success = false;
1241  }
1242
1243  return success;
1244}
1245
1246static inline void _Scheduler_SMP_Reconsider_help_request(
1247  Scheduler_Context     *context,
1248  Thread_Control        *thread,
1249  Scheduler_Node        *node,
1250  Scheduler_SMP_Extract  extract_from_ready
1251)
1252{
1253  ISR_lock_Context lock_context;
1254
1255  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1256
1257  if (
1258    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1259      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1260  ) {
1261    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1262    ( *extract_from_ready )( context, node );
1263  }
1264
1265  _Thread_Scheduler_release_critical( thread, &lock_context );
1266}
1267
1268static inline void _Scheduler_SMP_Withdraw_node(
1269  Scheduler_Context                *context,
1270  Thread_Control                   *thread,
1271  Scheduler_Node                   *node,
1272  Thread_Scheduler_state            next_state,
1273  Scheduler_SMP_Extract             extract_from_ready,
1274  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1275  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1276  Scheduler_SMP_Allocate_processor  allocate_processor
1277)
1278{
1279  ISR_lock_Context         lock_context;
1280  Scheduler_SMP_Node_state node_state;
1281
1282  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1283
1284  node_state = _Scheduler_SMP_Node_state( node );
1285  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1286
1287  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1288    Per_CPU_Control *thread_cpu;
1289
1290    thread_cpu = _Thread_Get_CPU( thread );
1291    _Scheduler_Thread_change_state( thread, next_state );
1292    _Thread_Scheduler_release_critical( thread, &lock_context );
1293
1294    _Scheduler_SMP_Extract_from_scheduled( node );
1295    _Scheduler_SMP_Schedule_highest_ready(
1296      context,
1297      node,
1298      thread_cpu,
1299      extract_from_ready,
1300      get_highest_ready,
1301      move_from_ready_to_scheduled,
1302      allocate_processor
1303    );
1304  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1305    _Thread_Scheduler_release_critical( thread, &lock_context );
1306    ( *extract_from_ready )( context, node );
1307  } else {
1308    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1309    _Thread_Scheduler_release_critical( thread, &lock_context );
1310  }
1311}
1312
[9d83f58a]1313/** @} */
1314
1315#ifdef __cplusplus
1316}
1317#endif /* __cplusplus */
1318
1319#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.