source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ 63e2ca1b

5
Last change on this file since 63e2ca1b was 63e2ca1b, checked in by Sebastian Huber <sebastian.huber@…>, on 10/31/16 at 08:13:35

score: Simplify yield and unblock scheduler ops

Update #2556.

  • Property mode set to 100644
File size: 34.2 KB
RevLine 
[9d83f58a]1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
[351c14d]10 * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
[9d83f58a]11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
[c499856]20 * http://www.rtems.org/license/LICENSE.
[9d83f58a]21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
[38b59a6]27#include <rtems/score/assert.h>
[48c4a55]28#include <rtems/score/chainimpl.h>
[38b59a6]29#include <rtems/score/schedulersimpleimpl.h>
[351c14d]30#include <rtems/bspIo.h>
[9d83f58a]31
32#ifdef __cplusplus
33extern "C" {
34#endif /* __cplusplus */
35
36/**
37 * @addtogroup ScoreSchedulerSMP
38 *
[c6522a65]39 * The scheduler nodes can be in four states
40 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
[f39f667a]41 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
42 * - @ref SCHEDULER_SMP_NODE_READY.
[c6522a65]43 *
[f39f667a]44 * State transitions are triggered via basic operations
[c0bff5e]45 * - _Scheduler_SMP_Enqueue_ordered(),
46 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
[f39f667a]47 * - _Scheduler_SMP_Block().
[c6522a65]48 *
49 * @dot
50 * digraph {
51 *   node [style="filled"];
52 *
53 *   bs [label="BLOCKED"];
54 *   ss [label="SCHEDULED", fillcolor="green"];
55 *   rs [label="READY", fillcolor="red"];
56 *
57 *   edge [label="enqueue"];
58 *   edge [fontcolor="darkgreen", color="darkgreen"];
59 *
60 *   bs -> ss;
61 *
62 *   edge [fontcolor="red", color="red"];
63 *
64 *   bs -> rs;
65 *
66 *   edge [label="enqueue other"];
67 *
68 *   ss -> rs;
69 *
[f39f667a]70 *   edge [label="block"];
[c6522a65]71 *   edge [fontcolor="black", color="black"];
72 *
[b532bb2c]73 *   ss -> bs;
[c6522a65]74 *   rs -> bs;
75 *
[f39f667a]76 *   edge [label="block other"];
[c6522a65]77 *   edge [fontcolor="darkgreen", color="darkgreen"];
78 *
79 *   rs -> ss;
80 * }
81 * @enddot
82 *
83 * During system initialization each processor of the scheduler instance starts
84 * with an idle thread assigned to it.  Lets have a look at an example with two
85 * idle threads I and J with priority 5.  We also have blocked threads A, B and
[2d96533]86 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
87 * with respect to the thread priority from left to right in the below
88 * diagrams.  The highest priority node (lowest priority number) is the
89 * leftmost node.  Since the processor assignment is independent of the thread
90 * priority the processor indices may move from one state to the other.
[c6522a65]91 *
92 * @dot
93 * digraph {
94 *   node [style="filled"];
95 *   edge [dir="none"];
96 *   subgraph {
97 *     rank = same;
98 *
99 *     i [label="I (5)", fillcolor="green"];
100 *     j [label="J (5)", fillcolor="green"];
101 *     a [label="A (1)"];
102 *     b [label="B (2)"];
103 *     c [label="C (3)"];
104 *     i -> j;
105 *   }
106 *
107 *   subgraph {
108 *     rank = same;
109 *
110 *     p0 [label="PROCESSOR 0", shape="box"];
111 *     p1 [label="PROCESSOR 1", shape="box"];
112 *   }
113 *
114 *   i -> p0;
115 *   j -> p1;
116 * }
117 * @enddot
118 *
119 * Lets start A.  For this an enqueue operation is performed.
120 *
121 * @dot
122 * digraph {
123 *   node [style="filled"];
124 *   edge [dir="none"];
125 *
126 *   subgraph {
127 *     rank = same;
128 *
129 *     i [label="I (5)", fillcolor="green"];
130 *     j [label="J (5)", fillcolor="red"];
131 *     a [label="A (1)", fillcolor="green"];
132 *     b [label="B (2)"];
133 *     c [label="C (3)"];
134 *     a -> i;
135 *   }
136 *
137 *   subgraph {
138 *     rank = same;
139 *
140 *     p0 [label="PROCESSOR 0", shape="box"];
141 *     p1 [label="PROCESSOR 1", shape="box"];
142 *   }
143 *
144 *   i -> p0;
145 *   a -> p1;
146 * }
147 * @enddot
148 *
149 * Lets start C.
150 *
151 * @dot
152 * digraph {
153 *   node [style="filled"];
154 *   edge [dir="none"];
155 *
156 *   subgraph {
157 *     rank = same;
158 *
159 *     a [label="A (1)", fillcolor="green"];
160 *     c [label="C (3)", fillcolor="green"];
161 *     i [label="I (5)", fillcolor="red"];
162 *     j [label="J (5)", fillcolor="red"];
163 *     b [label="B (2)"];
164 *     a -> c;
165 *     i -> j;
166 *   }
167 *
168 *   subgraph {
169 *     rank = same;
170 *
171 *     p0 [label="PROCESSOR 0", shape="box"];
172 *     p1 [label="PROCESSOR 1", shape="box"];
173 *   }
174 *
175 *   c -> p0;
176 *   a -> p1;
177 * }
178 * @enddot
179 *
180 * Lets start B.
181 *
182 * @dot
183 * digraph {
184 *   node [style="filled"];
185 *   edge [dir="none"];
186 *
187 *   subgraph {
188 *     rank = same;
189 *
190 *     a [label="A (1)", fillcolor="green"];
191 *     b [label="B (2)", fillcolor="green"];
192 *     c [label="C (3)", fillcolor="red"];
193 *     i [label="I (5)", fillcolor="red"];
194 *     j [label="J (5)", fillcolor="red"];
195 *     a -> b;
196 *     c -> i -> j;
197 *   }
198 *
199 *   subgraph {
200 *     rank = same;
201 *
202 *     p0 [label="PROCESSOR 0", shape="box"];
203 *     p1 [label="PROCESSOR 1", shape="box"];
204 *   }
205 *
206 *   b -> p0;
207 *   a -> p1;
208 * }
209 * @enddot
210 *
[f39f667a]211 * Lets change the priority of thread A to 4.
[c6522a65]212 *
213 * @dot
214 * digraph {
215 *   node [style="filled"];
216 *   edge [dir="none"];
217 *
218 *   subgraph {
219 *     rank = same;
220 *
221 *     b [label="B (2)", fillcolor="green"];
222 *     c [label="C (3)", fillcolor="green"];
223 *     a [label="A (4)", fillcolor="red"];
224 *     i [label="I (5)", fillcolor="red"];
225 *     j [label="J (5)", fillcolor="red"];
226 *     b -> c;
227 *     a -> i -> j;
228 *   }
229 *
230 *   subgraph {
231 *     rank = same;
232 *
233 *     p0 [label="PROCESSOR 0", shape="box"];
234 *     p1 [label="PROCESSOR 1", shape="box"];
235 *   }
236 *
237 *   b -> p0;
238 *   c -> p1;
239 * }
240 * @enddot
241 *
[f39f667a]242 * Now perform a blocking operation with thread B.  Please note that thread A
243 * migrated now from processor 0 to processor 1 and thread C still executes on
244 * processor 1.
[c6522a65]245 *
246 * @dot
247 * digraph {
248 *   node [style="filled"];
249 *   edge [dir="none"];
250 *
251 *   subgraph {
252 *     rank = same;
253 *
254 *     c [label="C (3)", fillcolor="green"];
[f39f667a]255 *     a [label="A (4)", fillcolor="green"];
[c6522a65]256 *     i [label="I (5)", fillcolor="red"];
257 *     j [label="J (5)", fillcolor="red"];
[f39f667a]258 *     b [label="B (2)"];
259 *     c -> a;
[c6522a65]260 *     i -> j;
261 *   }
262 *
263 *   subgraph {
264 *     rank = same;
265 *
266 *     p0 [label="PROCESSOR 0", shape="box"];
267 *     p1 [label="PROCESSOR 1", shape="box"];
268 *   }
269 *
[f39f667a]270 *   a -> p0;
[c6522a65]271 *   c -> p1;
272 * }
273 * @enddot
274 *
[9d83f58a]275 * @{
276 */
277
[8f0c7a46]278typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
[238629f]279  Scheduler_Context *context,
[8f0c7a46]280  Scheduler_Node    *node
[238629f]281);
282
[8f0c7a46]283typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
[238629f]284  Scheduler_Context *context,
[8f0c7a46]285  Scheduler_Node    *filter,
[238629f]286  Chain_Node_order   order
[48c4a55]287);
288
289typedef void ( *Scheduler_SMP_Extract )(
[3730a07f]290  Scheduler_Context *context,
[8f0c7a46]291  Scheduler_Node    *node_to_extract
[48c4a55]292);
293
294typedef void ( *Scheduler_SMP_Insert )(
[3730a07f]295  Scheduler_Context *context,
[8f0c7a46]296  Scheduler_Node    *node_to_insert
[48c4a55]297);
298
299typedef void ( *Scheduler_SMP_Move )(
[3730a07f]300  Scheduler_Context *context,
[8f0c7a46]301  Scheduler_Node    *node_to_move
[48c4a55]302);
303
[351c14d]304typedef bool ( *Scheduler_SMP_Ask_for_help )(
305  Scheduler_Context *context,
306  Thread_Control    *thread,
307  Scheduler_Node    *node
308);
309
[f39f667a]310typedef void ( *Scheduler_SMP_Update )(
311  Scheduler_Context *context,
[8f0c7a46]312  Scheduler_Node    *node_to_update,
[d9b54da]313  Priority_Control   new_priority
[f39f667a]314);
315
[63e2ca1b]316typedef bool ( *Scheduler_SMP_Enqueue )(
[f39f667a]317  Scheduler_Context *context,
[8f0c7a46]318  Scheduler_Node    *node_to_enqueue
[f39f667a]319);
320
[238629f]321typedef void ( *Scheduler_SMP_Allocate_processor )(
[8f0c7a46]322  Scheduler_Context *context,
[edb020c]323  Thread_Control    *scheduled_thread,
324  Thread_Control    *victim_thread,
325  Per_CPU_Control   *victim_cpu
[238629f]326);
327
[8f0c7a46]328static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
329  const Chain_Node *to_insert,
330  const Chain_Node *next
331)
332{
333  const Scheduler_SMP_Node *node_to_insert =
334    (const Scheduler_SMP_Node *) to_insert;
335  const Scheduler_SMP_Node *node_next =
336    (const Scheduler_SMP_Node *) next;
337
338  return node_to_insert->priority <= node_next->priority;
339}
340
341static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
342  const Chain_Node *to_insert,
343  const Chain_Node *next
344)
345{
346  const Scheduler_SMP_Node *node_to_insert =
347    (const Scheduler_SMP_Node *) to_insert;
348  const Scheduler_SMP_Node *node_next =
349    (const Scheduler_SMP_Node *) next;
350
351  return node_to_insert->priority < node_next->priority;
352}
353
[3730a07f]354static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
355  Scheduler_Context *context
356)
357{
358  return (Scheduler_SMP_Context *) context;
359}
360
[494c2e3]361static inline void _Scheduler_SMP_Initialize(
[e1598a6]362  Scheduler_SMP_Context *self
[494c2e3]363)
[9d83f58a]364{
[494c2e3]365  _Chain_Initialize_empty( &self->Scheduled );
[5c3d250]366  _Chain_Initialize_empty( &self->Idle_threads );
[9d83f58a]367}
368
[08d9760]369static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
[beab7329]370  Thread_Control *thread
371)
372{
[c0f1f52]373  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
[beab7329]374}
375
[5c3d250]376static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
377  Thread_Control *thread
378)
379{
[7f742432]380  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
[5c3d250]381}
382
[8f0c7a46]383static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
384  Scheduler_Node *node
385)
386{
387  return (Scheduler_SMP_Node *) node;
388}
389
[501043a]390static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
391  const Scheduler_Node *node
392)
393{
394  return ( (const Scheduler_SMP_Node *) node )->state;
395}
396
397static inline Priority_Control _Scheduler_SMP_Node_priority(
398  const Scheduler_Node *node
399)
400{
401  return ( (const Scheduler_SMP_Node *) node )->priority;
402}
403
[beab7329]404static inline void _Scheduler_SMP_Node_initialize(
[300f6a48]405  const Scheduler_Control *scheduler,
406  Scheduler_SMP_Node      *node,
407  Thread_Control          *thread,
408  Priority_Control         priority
[beab7329]409)
410{
[300f6a48]411  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
[beab7329]412  node->state = SCHEDULER_SMP_NODE_BLOCKED;
[9bfad8c]413  node->priority = priority;
[beab7329]414}
415
[8f0c7a46]416static inline void _Scheduler_SMP_Node_update_priority(
417  Scheduler_SMP_Node *node,
418  Priority_Control    new_priority
419)
420{
421  node->priority = new_priority;
422}
423
[f39f667a]424extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
[beab7329]425
426static inline void _Scheduler_SMP_Node_change_state(
[3a72411]427  Scheduler_Node           *node,
428  Scheduler_SMP_Node_state  new_state
[beab7329]429)
430{
[3a72411]431  Scheduler_SMP_Node *the_node;
432
433  the_node = _Scheduler_SMP_Node_downcast( node );
[beab7329]434  _Assert(
[3a72411]435    _Scheduler_SMP_Node_valid_state_changes[ the_node->state ][ new_state ]
[beab7329]436  );
437
[3a72411]438  the_node->state = new_state;
[beab7329]439}
440
[38b59a6]441static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
[8f0c7a46]442  const Scheduler_Context *context,
443  const Per_CPU_Control   *cpu
[38b59a6]444)
445{
[8f0c7a46]446  return cpu->scheduler_context == context;
[38b59a6]447}
448
[5c3d250]449static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
[27783f6]450  Scheduler_Context *context
[5c3d250]451)
452{
453  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
454  Thread_Control *idle = (Thread_Control *)
455    _Chain_Get_first_unprotected( &self->Idle_threads );
456
457  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
458
459  return idle;
460}
461
462static inline void _Scheduler_SMP_Release_idle_thread(
[27783f6]463  Scheduler_Context *context,
464  Thread_Control    *idle
[5c3d250]465)
466{
467  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
468
469  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
470}
471
[19e41767]472static inline void _Scheduler_SMP_Allocate_processor_lazy(
[8f0c7a46]473  Scheduler_Context *context,
[19e41767]474  Thread_Control    *scheduled_thread,
[edb020c]475  Thread_Control    *victim_thread,
476  Per_CPU_Control   *victim_cpu
[fc2ad63]477)
478{
[8f0c7a46]479  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
[38b59a6]480  Per_CPU_Control *cpu_self = _Per_CPU_Get();
[fc2ad63]481  Thread_Control *heir;
482
[38b59a6]483  _Assert( _ISR_Get_level() != 0 );
[fc2ad63]484
[8f0c7a46]485  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
486    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
487      heir = scheduled_cpu->heir;
[835b88b]488      _Thread_Dispatch_update_heir(
[8f0c7a46]489        cpu_self,
490        scheduled_cpu,
491        scheduled_thread
492      );
[38b59a6]493    } else {
494      /* We have to force a migration to our processor set */
[8f0c7a46]495      heir = scheduled_thread;
[38b59a6]496    }
[fc2ad63]497  } else {
[8f0c7a46]498    heir = scheduled_thread;
[fc2ad63]499  }
500
[8f0c7a46]501  if ( heir != victim_thread ) {
502    _Thread_Set_CPU( heir, victim_cpu );
[835b88b]503    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
[fc2ad63]504  }
505}
506
[09c87fb]507/*
508 * This method is slightly different from
509 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
510 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
511 * but does not take into account affinity.
512 */
513static inline void _Scheduler_SMP_Allocate_processor_exact(
514  Scheduler_Context *context,
515  Thread_Control    *scheduled_thread,
[edb020c]516  Thread_Control    *victim_thread,
517  Per_CPU_Control   *victim_cpu
[09c87fb]518)
519{
520  Per_CPU_Control *cpu_self = _Per_CPU_Get();
521
522  (void) context;
523
524  _Thread_Set_CPU( scheduled_thread, victim_cpu );
525  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
526}
527
[19e41767]528static inline void _Scheduler_SMP_Allocate_processor(
529  Scheduler_Context                *context,
530  Scheduler_Node                   *scheduled,
[a7a8ec03]531  Thread_Control                   *victim_thread,
[edb020c]532  Per_CPU_Control                  *victim_cpu,
[19e41767]533  Scheduler_SMP_Allocate_processor  allocate_processor
534)
535{
[ac532f3]536  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
[19e41767]537
[3a72411]538  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
[19e41767]539
[edb020c]540  ( *allocate_processor )(
541    context,
542    scheduled_thread,
543    victim_thread,
544    victim_cpu
545  );
[19e41767]546}
547
[a7a8ec03]548static inline Thread_Control *_Scheduler_SMP_Preempt(
549  Scheduler_Context                *context,
550  Scheduler_Node                   *scheduled,
551  Scheduler_Node                   *victim,
552  Scheduler_SMP_Allocate_processor  allocate_processor
553)
554{
555  Thread_Control   *victim_thread;
556  ISR_lock_Context  lock_context;
[edb020c]557  Per_CPU_Control  *victim_cpu;
[a7a8ec03]558
559  victim_thread = _Scheduler_Node_get_user( victim );
560  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
561
562  _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
[351c14d]563
[edb020c]564  victim_cpu = _Thread_Get_CPU( victim_thread );
[351c14d]565
566  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
567    _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
568
569    if ( victim_thread->Scheduler.helping_nodes > 0 ) {
570      _Per_CPU_Acquire( victim_cpu );
571      _Chain_Append_unprotected(
572        &victim_cpu->Threads_in_need_for_help,
573        &victim_thread->Scheduler.Help_node
574      );
575      _Per_CPU_Release( victim_cpu );
576    }
577  }
578
[a7a8ec03]579  _Thread_Scheduler_release_critical( victim_thread, &lock_context );
580
581  _Scheduler_SMP_Allocate_processor(
582    context,
583    scheduled,
584    victim_thread,
[edb020c]585    victim_cpu,
[a7a8ec03]586    allocate_processor
587  );
588
589  return victim_thread;
590}
591
[8f0c7a46]592static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
[238629f]593  Scheduler_Context *context,
[8f0c7a46]594  Scheduler_Node    *filter,
[238629f]595  Chain_Node_order   order
[aea4a91]596)
597{
[238629f]598  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
[494c2e3]599  Chain_Control *scheduled = &self->Scheduled;
[8f0c7a46]600  Scheduler_Node *lowest_scheduled =
601    (Scheduler_Node *) _Chain_Last( scheduled );
[aea4a91]602
[8f0c7a46]603  (void) filter;
604  (void) order;
[aea4a91]605
[5c632c4]606  _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
[351c14d]607  _Assert(
608    _Chain_Next( &lowest_scheduled->Node ) == _Chain_Tail( scheduled )
609  );
[238629f]610
[8f0c7a46]611  return lowest_scheduled;
[aea4a91]612}
613
[63e2ca1b]614static inline void _Scheduler_SMP_Enqueue_to_scheduled(
[5c3d250]615  Scheduler_Context                *context,
616  Scheduler_Node                   *node,
617  Scheduler_Node                   *lowest_scheduled,
618  Scheduler_SMP_Insert              insert_scheduled,
619  Scheduler_SMP_Move                move_from_scheduled_to_ready,
[27783f6]620  Scheduler_SMP_Allocate_processor  allocate_processor
[5c3d250]621)
622{
[be0366b]623  Scheduler_Try_to_schedule_action action;
[5c3d250]624
[be0366b]625  action = _Scheduler_Try_to_schedule_node(
[5c3d250]626    context,
627    node,
[be0366b]628    _Scheduler_Node_get_idle( lowest_scheduled ),
629    _Scheduler_SMP_Get_idle_thread
[5c3d250]630  );
631
[be0366b]632  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
[63e2ca1b]633    _Scheduler_SMP_Preempt(
[be0366b]634      context,
635      node,
636      lowest_scheduled,
637      allocate_processor
638    );
639
640    ( *insert_scheduled )( context, node );
641    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
642
[63e2ca1b]643    _Scheduler_Release_idle_thread(
[be0366b]644      context,
645      lowest_scheduled,
646      _Scheduler_SMP_Release_idle_thread
647    );
648  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
649    _Scheduler_SMP_Node_change_state(
[3a72411]650      lowest_scheduled,
[be0366b]651      SCHEDULER_SMP_NODE_READY
652    );
[3a72411]653    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
[be0366b]654
655    ( *insert_scheduled )( context, node );
656    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
657
658    _Scheduler_Exchange_idle_thread(
659      node,
660      lowest_scheduled,
661      _Scheduler_Node_get_idle( lowest_scheduled )
662    );
[5c3d250]663  } else {
[be0366b]664    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[3a72411]665    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
[5c3d250]666  }
667}
668
[c6522a65]669/**
[8f0c7a46]670 * @brief Enqueues a node according to the specified order function.
[c6522a65]671 *
[8f0c7a46]672 * The node must not be in the scheduled state.
[c0bff5e]673 *
[c6522a65]674 * @param[in] context The scheduler instance context.
[8f0c7a46]675 * @param[in] node The node to enqueue.
[c6522a65]676 * @param[in] order The order function.
677 * @param[in] insert_ready Function to insert a node into the set of ready
[238629f]678 *   nodes.
[c6522a65]679 * @param[in] insert_scheduled Function to insert a node into the set of
[238629f]680 *   scheduled nodes.
[c6522a65]681 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
[238629f]682 *   of scheduled nodes to the set of ready nodes.
[8f0c7a46]683 * @param[in] get_lowest_scheduled Function to select the node from the
[82df6f3]684 *   scheduled nodes to replace.  It may not be possible to find one, in this
685 *   case a pointer must be returned so that the order functions returns false
686 *   if this pointer is passed as the second argument to the order function.
[8f0c7a46]687 * @param[in] allocate_processor Function to allocate a processor to a node
[238629f]688 *   based on the rules of the scheduler.
[c6522a65]689 */
[63e2ca1b]690static inline bool _Scheduler_SMP_Enqueue_ordered(
[d9b54da]691  Scheduler_Context                  *context,
[8f0c7a46]692  Scheduler_Node                     *node,
[d9b54da]693  Chain_Node_order                    order,
[238629f]694  Scheduler_SMP_Insert                insert_ready,
695  Scheduler_SMP_Insert                insert_scheduled,
696  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
697  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
[27783f6]698  Scheduler_SMP_Allocate_processor    allocate_processor
[48c4a55]699)
[c0bff5e]700{
[63e2ca1b]701  bool            needs_help;
702  Scheduler_Node *lowest_scheduled;
703
704  lowest_scheduled = ( *get_lowest_scheduled )( context, node, order );
[c0bff5e]705
[8f0c7a46]706  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
[63e2ca1b]707    _Scheduler_SMP_Enqueue_to_scheduled(
[19e41767]708      context,
709      node,
710      lowest_scheduled,
[5c3d250]711      insert_scheduled,
712      move_from_scheduled_to_ready,
[27783f6]713      allocate_processor
[19e41767]714    );
[63e2ca1b]715    needs_help = false;
[c0bff5e]716  } else {
[8f0c7a46]717    ( *insert_ready )( context, node );
[63e2ca1b]718    needs_help = true;
[c0bff5e]719  }
[8568341]720
721  return needs_help;
[c0bff5e]722}
723
724/**
[8f0c7a46]725 * @brief Enqueues a scheduled node according to the specified order
[c0bff5e]726 * function.
727 *
728 * @param[in] context The scheduler instance context.
[8f0c7a46]729 * @param[in] node The node to enqueue.
[c0bff5e]730 * @param[in] order The order function.
[5c3d250]731 * @param[in] extract_from_ready Function to extract a node from the set of
732 *   ready nodes.
[c0bff5e]733 * @param[in] get_highest_ready Function to get the highest ready node.
734 * @param[in] insert_ready Function to insert a node into the set of ready
[238629f]735 *   nodes.
[c0bff5e]736 * @param[in] insert_scheduled Function to insert a node into the set of
[238629f]737 *   scheduled nodes.
[c0bff5e]738 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
[238629f]739 *   of ready nodes to the set of scheduled nodes.
[8f0c7a46]740 * @param[in] allocate_processor Function to allocate a processor to a node
[238629f]741 *   based on the rules of the scheduler.
[c0bff5e]742 */
[63e2ca1b]743static inline bool _Scheduler_SMP_Enqueue_scheduled_ordered(
[d9b54da]744  Scheduler_Context                *context,
[8f0c7a46]745  Scheduler_Node                   *node,
[238629f]746  Chain_Node_order                  order,
[5c3d250]747  Scheduler_SMP_Extract             extract_from_ready,
[238629f]748  Scheduler_SMP_Get_highest_ready   get_highest_ready,
749  Scheduler_SMP_Insert              insert_ready,
750  Scheduler_SMP_Insert              insert_scheduled,
751  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]752  Scheduler_SMP_Allocate_processor  allocate_processor
[c0bff5e]753)
[48c4a55]754{
[d057d653]755  while ( true ) {
756    Scheduler_Node                   *highest_ready;
757    Scheduler_Try_to_schedule_action  action;
[48c4a55]758
[d057d653]759    highest_ready = ( *get_highest_ready )( context, node );
[5c3d250]760
761    /*
762     * The node has been extracted from the scheduled chain.  We have to place
763     * it now on the scheduled or ready set.
764     */
[6771359f]765    if (
766      node->sticky_level > 0
767        && ( *order )( &node->Node, &highest_ready->Node )
768    ) {
[5c3d250]769      ( *insert_scheduled )( context, node );
[6771359f]770
771      if ( _Scheduler_Node_get_idle( node ) != NULL ) {
772        Thread_Control   *owner;
773        ISR_lock_Context  lock_context;
774
775        owner = _Scheduler_Node_get_owner( node );
776        _Thread_Scheduler_acquire_critical( owner, &lock_context );
777
778        if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
779          _Thread_Scheduler_cancel_need_for_help(
780            owner,
781            _Thread_Get_CPU( owner )
782          );
783          _Scheduler_Discard_idle_thread(
784            context,
785            owner,
786            node,
787            _Scheduler_SMP_Release_idle_thread
788          );
789          _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
790        }
791
792        _Thread_Scheduler_release_critical( owner, &lock_context );
793      }
794
[63e2ca1b]795      return false;
[d057d653]796    }
[5c3d250]797
[d057d653]798    action = _Scheduler_Try_to_schedule_node(
799      context,
800      highest_ready,
801      _Scheduler_Node_get_idle( node ),
802      _Scheduler_SMP_Get_idle_thread
803    );
804
805    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
806      Thread_Control *idle;
[5c3d250]807
[63e2ca1b]808      _Scheduler_SMP_Preempt(
[5c3d250]809        context,
810        highest_ready,
[d057d653]811        node,
812        allocate_processor
[5c3d250]813      );
[c0bff5e]814
[d057d653]815      ( *insert_ready )( context, node );
816      ( *move_from_ready_to_scheduled )( context, highest_ready );
817
818      idle = _Scheduler_Release_idle_thread(
819        context,
820        node,
821        _Scheduler_SMP_Release_idle_thread
822      );
[63e2ca1b]823      return ( idle == NULL );
[d057d653]824    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
[3a72411]825      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
[d057d653]826      _Scheduler_SMP_Node_change_state(
[3a72411]827        highest_ready,
[d057d653]828        SCHEDULER_SMP_NODE_SCHEDULED
829      );
[19e41767]830
[d057d653]831      ( *insert_ready )( context, node );
832      ( *move_from_ready_to_scheduled )( context, highest_ready );
[8568341]833
[d057d653]834      _Scheduler_Exchange_idle_thread(
835        highest_ready,
836        node,
837        _Scheduler_Node_get_idle( node )
838      );
[63e2ca1b]839      return false;
[d057d653]840    } else {
841      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[be0366b]842
[d057d653]843      _Scheduler_SMP_Node_change_state(
[3a72411]844        highest_ready,
[d057d653]845        SCHEDULER_SMP_NODE_BLOCKED
846      );
[8568341]847
[d057d653]848      ( *extract_from_ready )( context, highest_ready );
849    }
850  }
[48c4a55]851}
852
[c0bff5e]853static inline void _Scheduler_SMP_Extract_from_scheduled(
[8f0c7a46]854  Scheduler_Node *node
[c0bff5e]855)
[f39f667a]856{
[8f0c7a46]857  _Chain_Extract_unprotected( &node->Node );
[f39f667a]858}
859
[48c4a55]860static inline void _Scheduler_SMP_Schedule_highest_ready(
[d9b54da]861  Scheduler_Context                *context,
[8f0c7a46]862  Scheduler_Node                   *victim,
[edb020c]863  Per_CPU_Control                  *victim_cpu,
[5c3d250]864  Scheduler_SMP_Extract             extract_from_ready,
[d9b54da]865  Scheduler_SMP_Get_highest_ready   get_highest_ready,
866  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]867  Scheduler_SMP_Allocate_processor  allocate_processor
[48c4a55]868)
869{
[d057d653]870  Scheduler_Try_to_schedule_action action;
871
[be0366b]872  do {
[5c3d250]873    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
874
[be0366b]875    action = _Scheduler_Try_to_schedule_node(
876      context,
877      highest_ready,
878      NULL,
879      _Scheduler_SMP_Get_idle_thread
880    );
881
882    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
[5c3d250]883      _Scheduler_SMP_Allocate_processor(
884        context,
885        highest_ready,
[a7a8ec03]886        _Scheduler_Node_get_user( victim ),
[edb020c]887        victim_cpu,
[5c3d250]888        allocate_processor
889      );
[48c4a55]890
[5c3d250]891      ( *move_from_ready_to_scheduled )( context, highest_ready );
892    } else {
[be0366b]893      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
894
[5c3d250]895      _Scheduler_SMP_Node_change_state(
[3a72411]896        highest_ready,
[5c3d250]897        SCHEDULER_SMP_NODE_BLOCKED
898      );
[19e41767]899
[5c3d250]900      ( *extract_from_ready )( context, highest_ready );
901    }
[d057d653]902  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[48c4a55]903}
904
[c6522a65]905/**
[f39f667a]906 * @brief Blocks a thread.
[c6522a65]907 *
908 * @param[in] context The scheduler instance context.
909 * @param[in] thread The thread of the scheduling operation.
[e382a1b]910 * @param[in] node The scheduler node of the thread to block.
[f39f667a]911 * @param[in] extract_from_ready Function to extract a node from the set of
[5c3d250]912 *   ready nodes.
[c6522a65]913 * @param[in] get_highest_ready Function to get the highest ready node.
914 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
[5c3d250]915 *   of ready nodes to the set of scheduled nodes.
[c6522a65]916 */
[f39f667a]917static inline void _Scheduler_SMP_Block(
[d9b54da]918  Scheduler_Context                *context,
919  Thread_Control                   *thread,
[e382a1b]920  Scheduler_Node                   *node,
[d9b54da]921  Scheduler_SMP_Extract             extract_from_ready,
922  Scheduler_SMP_Get_highest_ready   get_highest_ready,
923  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]924  Scheduler_SMP_Allocate_processor  allocate_processor
[48c4a55]925)
926{
[edb020c]927  Scheduler_SMP_Node_state  node_state;
928  Per_CPU_Control          *thread_cpu;
[cceb19f4]929
[e382a1b]930  node_state = _Scheduler_SMP_Node_state( node );
[cceb19f4]931
[edb020c]932  thread_cpu = _Scheduler_Block_node(
[5c3d250]933    context,
[cceb19f4]934    thread,
[e382a1b]935    node,
936    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
[27783f6]937    _Scheduler_SMP_Get_idle_thread
[5c3d250]938  );
[351c14d]939
[edb020c]940  if ( thread_cpu != NULL ) {
[3a72411]941    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
[beab7329]942
[e382a1b]943    if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
944      _Scheduler_SMP_Extract_from_scheduled( node );
[5c3d250]945      _Scheduler_SMP_Schedule_highest_ready(
946        context,
[e382a1b]947        node,
[edb020c]948        thread_cpu,
[5c3d250]949        extract_from_ready,
950        get_highest_ready,
951        move_from_ready_to_scheduled,
[27783f6]952        allocate_processor
[5c3d250]953      );
[351c14d]954    } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
[e382a1b]955      ( *extract_from_ready )( context, node );
[5c3d250]956    }
[48c4a55]957  }
958}
959
[63e2ca1b]960static inline bool _Scheduler_SMP_Unblock(
[9bfad8c]961  Scheduler_Context     *context,
962  Thread_Control        *thread,
[72e0bdb]963  Scheduler_Node        *node,
[9bfad8c]964  Scheduler_SMP_Update   update,
965  Scheduler_SMP_Enqueue  enqueue_fifo
[c0bff5e]966)
967{
[72e0bdb]968  Scheduler_SMP_Node_state  node_state;
969  bool                      unblock;
[63e2ca1b]970  bool                      needs_help;
[9bfad8c]971
[72e0bdb]972  node_state = _Scheduler_SMP_Node_state( node );
[9bfad8c]973  unblock = _Scheduler_Unblock_node(
[5c3d250]974    context,
975    thread,
[72e0bdb]976    node,
977    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
[27783f6]978    _Scheduler_SMP_Release_idle_thread
[5c3d250]979  );
[c0bff5e]980
[5c3d250]981  if ( unblock ) {
[9bfad8c]982    Priority_Control new_priority;
983    bool             prepend_it;
984
[72e0bdb]985    new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
[9bfad8c]986    (void) prepend_it;
987
[72e0bdb]988    if ( new_priority != _Scheduler_SMP_Node_priority( node ) ) {
989      ( *update )( context, node, new_priority );
[9bfad8c]990    }
991
[72e0bdb]992    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
[3a72411]993      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
[cceb19f4]994
[63e2ca1b]995      needs_help = ( *enqueue_fifo )( context, node );
[cceb19f4]996    } else {
[72e0bdb]997      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
[6771359f]998      _Assert( node->sticky_level > 0 );
[72e0bdb]999      _Assert( node->idle == NULL );
[63e2ca1b]1000      needs_help = true;
[cceb19f4]1001    }
[5c3d250]1002  } else {
[63e2ca1b]1003    needs_help = false;
[5c3d250]1004  }
1005
1006  return needs_help;
[c0bff5e]1007}
1008
[9c238e1]1009static inline void _Scheduler_SMP_Update_priority(
[63e2ca1b]1010  Scheduler_Context          *context,
1011  Thread_Control             *thread,
1012  Scheduler_Node             *node,
1013  Scheduler_SMP_Extract       extract_from_ready,
1014  Scheduler_SMP_Update        update,
1015  Scheduler_SMP_Enqueue       enqueue_fifo,
1016  Scheduler_SMP_Enqueue       enqueue_lifo,
1017  Scheduler_SMP_Enqueue       enqueue_scheduled_fifo,
1018  Scheduler_SMP_Enqueue       enqueue_scheduled_lifo,
1019  Scheduler_SMP_Ask_for_help  ask_for_help
[48c4a55]1020)
1021{
[501043a]1022  Priority_Control         new_priority;
1023  bool                     prepend_it;
1024  Scheduler_SMP_Node_state node_state;
[9bfad8c]1025
[501043a]1026  new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
[9bfad8c]1027
[501043a]1028  if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) {
[351c14d]1029    if ( _Thread_Is_ready( thread ) ) {
1030      ( *ask_for_help )( context, thread, node );
1031    }
1032
[9c238e1]1033    return;
[9bfad8c]1034  }
[a336d51]1035
[501043a]1036  node_state = _Scheduler_SMP_Node_state( node );
1037
1038  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1039    _Scheduler_SMP_Extract_from_scheduled( node );
[c0bff5e]1040
[501043a]1041    ( *update )( context, node, new_priority );
[c0bff5e]1042
1043    if ( prepend_it ) {
[9c238e1]1044      ( *enqueue_scheduled_lifo )( context, node );
[c0bff5e]1045    } else {
[9c238e1]1046      ( *enqueue_scheduled_fifo )( context, node );
[c0bff5e]1047    }
[501043a]1048  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1049    ( *extract_from_ready )( context, node );
[48c4a55]1050
[501043a]1051    ( *update )( context, node, new_priority );
[f39f667a]1052
[c0bff5e]1053    if ( prepend_it ) {
[63e2ca1b]1054      ( *enqueue_lifo )( context, node );
[c0bff5e]1055    } else {
[63e2ca1b]1056      ( *enqueue_fifo )( context, node );
[c0bff5e]1057    }
[5c3d250]1058  } else {
[501043a]1059    ( *update )( context, node, new_priority );
[5c3d250]1060
[351c14d]1061    if ( _Thread_Is_ready( thread ) ) {
1062      ( *ask_for_help )( context, thread, node );
1063    }
[f39f667a]1064  }
[48c4a55]1065}
1066
[63e2ca1b]1067static inline bool _Scheduler_SMP_Yield(
1068  Scheduler_Context     *context,
1069  Thread_Control        *thread,
1070  Scheduler_Node        *node,
1071  Scheduler_SMP_Extract  extract_from_ready,
1072  Scheduler_SMP_Enqueue  enqueue_fifo,
1073  Scheduler_SMP_Enqueue  enqueue_scheduled_fifo
[701dd96f]1074)
1075{
[63e2ca1b]1076  bool                     needs_help;
1077  Scheduler_SMP_Node_state node_state;
[6a82f1ae]1078
1079  node_state = _Scheduler_SMP_Node_state( node );
[701dd96f]1080
[6a82f1ae]1081  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
[2df4abc]1082    _Scheduler_SMP_Extract_from_scheduled( node );
[701dd96f]1083
[2df4abc]1084    needs_help = ( *enqueue_scheduled_fifo )( context, node );
[6a82f1ae]1085  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
[2df4abc]1086    ( *extract_from_ready )( context, node );
[701dd96f]1087
[63e2ca1b]1088    needs_help = ( *enqueue_fifo )( context, node );
[6a82f1ae]1089  } else {
[63e2ca1b]1090    needs_help = true;
[701dd96f]1091  }
[8568341]1092
1093  return needs_help;
[701dd96f]1094}
1095
[48c4a55]1096static inline void _Scheduler_SMP_Insert_scheduled_lifo(
[3730a07f]1097  Scheduler_Context *context,
[8f0c7a46]1098  Scheduler_Node    *node_to_insert
[48c4a55]1099)
1100{
[3730a07f]1101  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1102
[48c4a55]1103  _Chain_Insert_ordered_unprotected(
[494c2e3]1104    &self->Scheduled,
[8f0c7a46]1105    &node_to_insert->Node,
1106    _Scheduler_SMP_Insert_priority_lifo_order
[48c4a55]1107  );
1108}
1109
1110static inline void _Scheduler_SMP_Insert_scheduled_fifo(
[3730a07f]1111  Scheduler_Context *context,
[8f0c7a46]1112  Scheduler_Node    *node_to_insert
[48c4a55]1113)
1114{
[3730a07f]1115  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1116
[48c4a55]1117  _Chain_Insert_ordered_unprotected(
[494c2e3]1118    &self->Scheduled,
[8f0c7a46]1119    &node_to_insert->Node,
1120    _Scheduler_SMP_Insert_priority_fifo_order
[48c4a55]1121  );
1122}
1123
[351c14d]1124static inline bool _Scheduler_SMP_Ask_for_help(
1125  Scheduler_Context                  *context,
1126  Thread_Control                     *thread,
1127  Scheduler_Node                     *node,
1128  Chain_Node_order                    order,
1129  Scheduler_SMP_Insert                insert_ready,
1130  Scheduler_SMP_Insert                insert_scheduled,
1131  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1132  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1133  Scheduler_SMP_Allocate_processor    allocate_processor
1134)
1135{
1136  Scheduler_Node   *lowest_scheduled;
1137  ISR_lock_Context  lock_context;
1138  bool              success;
1139
1140  lowest_scheduled = ( *get_lowest_scheduled )( context, node, order );
1141
1142  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1143
[6771359f]1144  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1145    Scheduler_SMP_Node_state node_state;
1146
1147    node_state = _Scheduler_SMP_Node_state( node );
1148
1149    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1150      if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
1151        _Thread_Scheduler_cancel_need_for_help(
1152          thread,
1153          _Thread_Get_CPU( thread )
1154        );
1155        _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1156        _Thread_Scheduler_release_critical( thread, &lock_context );
1157
1158        _Scheduler_SMP_Preempt(
1159          context,
1160          node,
1161          lowest_scheduled,
1162          allocate_processor
1163        );
1164
1165        ( *insert_scheduled )( context, node );
1166        ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1167
1168        _Scheduler_Release_idle_thread(
1169          context,
1170          lowest_scheduled,
1171          _Scheduler_SMP_Release_idle_thread
1172        );
1173        success = true;
1174      } else {
1175        _Thread_Scheduler_release_critical( thread, &lock_context );
1176        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1177        ( *insert_ready )( context, node );
1178        success = false;
1179      }
1180    } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
[351c14d]1181      _Thread_Scheduler_cancel_need_for_help(
1182        thread,
1183        _Thread_Get_CPU( thread )
1184      );
[6771359f]1185      _Scheduler_Discard_idle_thread(
[351c14d]1186        context,
[6771359f]1187        thread,
[351c14d]1188        node,
1189        _Scheduler_SMP_Release_idle_thread
1190      );
[6771359f]1191      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1192      _Thread_Scheduler_release_critical( thread, &lock_context );
[351c14d]1193      success = true;
1194    } else {
1195      _Thread_Scheduler_release_critical( thread, &lock_context );
1196      success = false;
1197    }
1198  } else {
1199    _Thread_Scheduler_release_critical( thread, &lock_context );
1200    success = false;
1201  }
1202
1203  return success;
1204}
1205
1206static inline void _Scheduler_SMP_Reconsider_help_request(
1207  Scheduler_Context     *context,
1208  Thread_Control        *thread,
1209  Scheduler_Node        *node,
1210  Scheduler_SMP_Extract  extract_from_ready
1211)
1212{
1213  ISR_lock_Context lock_context;
1214
1215  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1216
1217  if (
1218    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1219      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
[6771359f]1220      && node->sticky_level == 1
[351c14d]1221  ) {
1222    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1223    ( *extract_from_ready )( context, node );
1224  }
1225
1226  _Thread_Scheduler_release_critical( thread, &lock_context );
1227}
1228
1229static inline void _Scheduler_SMP_Withdraw_node(
1230  Scheduler_Context                *context,
1231  Thread_Control                   *thread,
1232  Scheduler_Node                   *node,
1233  Thread_Scheduler_state            next_state,
1234  Scheduler_SMP_Extract             extract_from_ready,
1235  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1236  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1237  Scheduler_SMP_Allocate_processor  allocate_processor
1238)
1239{
1240  ISR_lock_Context         lock_context;
1241  Scheduler_SMP_Node_state node_state;
1242
1243  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1244
1245  node_state = _Scheduler_SMP_Node_state( node );
1246  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1247
1248  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1249    Per_CPU_Control *thread_cpu;
1250
1251    thread_cpu = _Thread_Get_CPU( thread );
1252    _Scheduler_Thread_change_state( thread, next_state );
1253    _Thread_Scheduler_release_critical( thread, &lock_context );
1254
1255    _Scheduler_SMP_Extract_from_scheduled( node );
1256    _Scheduler_SMP_Schedule_highest_ready(
1257      context,
1258      node,
1259      thread_cpu,
1260      extract_from_ready,
1261      get_highest_ready,
1262      move_from_ready_to_scheduled,
1263      allocate_processor
1264    );
1265  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1266    _Thread_Scheduler_release_critical( thread, &lock_context );
1267    ( *extract_from_ready )( context, node );
1268  } else {
1269    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1270    _Thread_Scheduler_release_critical( thread, &lock_context );
1271  }
1272}
1273
[9d83f58a]1274/** @} */
1275
1276#ifdef __cplusplus
1277}
1278#endif /* __cplusplus */
1279
1280#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.