source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ edb020c

5
Last change on this file since edb020c was edb020c, checked in by Sebastian Huber <sebastian.huber@…>, on 10/14/16 at 11:03:46

score: Protect thread CPU by thread scheduler lock

Update #2556.

  • Property mode set to 100644
File size: 30.2 KB
RevLine 
[9d83f58a]1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
[be0366b]10 * Copyright (c) 2013-2015 embedded brains GmbH.  All rights reserved.
[9d83f58a]11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
[c499856]20 * http://www.rtems.org/license/LICENSE.
[9d83f58a]21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
[38b59a6]27#include <rtems/score/assert.h>
[48c4a55]28#include <rtems/score/chainimpl.h>
[38b59a6]29#include <rtems/score/schedulersimpleimpl.h>
[9d83f58a]30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
[c6522a65]38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
[f39f667a]40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
41 * - @ref SCHEDULER_SMP_NODE_READY.
[c6522a65]42 *
[f39f667a]43 * State transitions are triggered via basic operations
[c0bff5e]44 * - _Scheduler_SMP_Enqueue_ordered(),
45 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
[f39f667a]46 * - _Scheduler_SMP_Block().
[c6522a65]47 *
48 * @dot
49 * digraph {
50 *   node [style="filled"];
51 *
52 *   bs [label="BLOCKED"];
53 *   ss [label="SCHEDULED", fillcolor="green"];
54 *   rs [label="READY", fillcolor="red"];
55 *
56 *   edge [label="enqueue"];
57 *   edge [fontcolor="darkgreen", color="darkgreen"];
58 *
59 *   bs -> ss;
60 *
61 *   edge [fontcolor="red", color="red"];
62 *
63 *   bs -> rs;
64 *
65 *   edge [label="enqueue other"];
66 *
67 *   ss -> rs;
68 *
[f39f667a]69 *   edge [label="block"];
[c6522a65]70 *   edge [fontcolor="black", color="black"];
71 *
[b532bb2c]72 *   ss -> bs;
[c6522a65]73 *   rs -> bs;
74 *
[f39f667a]75 *   edge [label="block other"];
[c6522a65]76 *   edge [fontcolor="darkgreen", color="darkgreen"];
77 *
78 *   rs -> ss;
79 * }
80 * @enddot
81 *
82 * During system initialization each processor of the scheduler instance starts
83 * with an idle thread assigned to it.  Lets have a look at an example with two
84 * idle threads I and J with priority 5.  We also have blocked threads A, B and
[2d96533]85 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
86 * with respect to the thread priority from left to right in the below
87 * diagrams.  The highest priority node (lowest priority number) is the
88 * leftmost node.  Since the processor assignment is independent of the thread
89 * priority the processor indices may move from one state to the other.
[c6522a65]90 *
91 * @dot
92 * digraph {
93 *   node [style="filled"];
94 *   edge [dir="none"];
95 *   subgraph {
96 *     rank = same;
97 *
98 *     i [label="I (5)", fillcolor="green"];
99 *     j [label="J (5)", fillcolor="green"];
100 *     a [label="A (1)"];
101 *     b [label="B (2)"];
102 *     c [label="C (3)"];
103 *     i -> j;
104 *   }
105 *
106 *   subgraph {
107 *     rank = same;
108 *
109 *     p0 [label="PROCESSOR 0", shape="box"];
110 *     p1 [label="PROCESSOR 1", shape="box"];
111 *   }
112 *
113 *   i -> p0;
114 *   j -> p1;
115 * }
116 * @enddot
117 *
118 * Lets start A.  For this an enqueue operation is performed.
119 *
120 * @dot
121 * digraph {
122 *   node [style="filled"];
123 *   edge [dir="none"];
124 *
125 *   subgraph {
126 *     rank = same;
127 *
128 *     i [label="I (5)", fillcolor="green"];
129 *     j [label="J (5)", fillcolor="red"];
130 *     a [label="A (1)", fillcolor="green"];
131 *     b [label="B (2)"];
132 *     c [label="C (3)"];
133 *     a -> i;
134 *   }
135 *
136 *   subgraph {
137 *     rank = same;
138 *
139 *     p0 [label="PROCESSOR 0", shape="box"];
140 *     p1 [label="PROCESSOR 1", shape="box"];
141 *   }
142 *
143 *   i -> p0;
144 *   a -> p1;
145 * }
146 * @enddot
147 *
148 * Lets start C.
149 *
150 * @dot
151 * digraph {
152 *   node [style="filled"];
153 *   edge [dir="none"];
154 *
155 *   subgraph {
156 *     rank = same;
157 *
158 *     a [label="A (1)", fillcolor="green"];
159 *     c [label="C (3)", fillcolor="green"];
160 *     i [label="I (5)", fillcolor="red"];
161 *     j [label="J (5)", fillcolor="red"];
162 *     b [label="B (2)"];
163 *     a -> c;
164 *     i -> j;
165 *   }
166 *
167 *   subgraph {
168 *     rank = same;
169 *
170 *     p0 [label="PROCESSOR 0", shape="box"];
171 *     p1 [label="PROCESSOR 1", shape="box"];
172 *   }
173 *
174 *   c -> p0;
175 *   a -> p1;
176 * }
177 * @enddot
178 *
179 * Lets start B.
180 *
181 * @dot
182 * digraph {
183 *   node [style="filled"];
184 *   edge [dir="none"];
185 *
186 *   subgraph {
187 *     rank = same;
188 *
189 *     a [label="A (1)", fillcolor="green"];
190 *     b [label="B (2)", fillcolor="green"];
191 *     c [label="C (3)", fillcolor="red"];
192 *     i [label="I (5)", fillcolor="red"];
193 *     j [label="J (5)", fillcolor="red"];
194 *     a -> b;
195 *     c -> i -> j;
196 *   }
197 *
198 *   subgraph {
199 *     rank = same;
200 *
201 *     p0 [label="PROCESSOR 0", shape="box"];
202 *     p1 [label="PROCESSOR 1", shape="box"];
203 *   }
204 *
205 *   b -> p0;
206 *   a -> p1;
207 * }
208 * @enddot
209 *
[f39f667a]210 * Lets change the priority of thread A to 4.
[c6522a65]211 *
212 * @dot
213 * digraph {
214 *   node [style="filled"];
215 *   edge [dir="none"];
216 *
217 *   subgraph {
218 *     rank = same;
219 *
220 *     b [label="B (2)", fillcolor="green"];
221 *     c [label="C (3)", fillcolor="green"];
222 *     a [label="A (4)", fillcolor="red"];
223 *     i [label="I (5)", fillcolor="red"];
224 *     j [label="J (5)", fillcolor="red"];
225 *     b -> c;
226 *     a -> i -> j;
227 *   }
228 *
229 *   subgraph {
230 *     rank = same;
231 *
232 *     p0 [label="PROCESSOR 0", shape="box"];
233 *     p1 [label="PROCESSOR 1", shape="box"];
234 *   }
235 *
236 *   b -> p0;
237 *   c -> p1;
238 * }
239 * @enddot
240 *
[f39f667a]241 * Now perform a blocking operation with thread B.  Please note that thread A
242 * migrated now from processor 0 to processor 1 and thread C still executes on
243 * processor 1.
[c6522a65]244 *
245 * @dot
246 * digraph {
247 *   node [style="filled"];
248 *   edge [dir="none"];
249 *
250 *   subgraph {
251 *     rank = same;
252 *
253 *     c [label="C (3)", fillcolor="green"];
[f39f667a]254 *     a [label="A (4)", fillcolor="green"];
[c6522a65]255 *     i [label="I (5)", fillcolor="red"];
256 *     j [label="J (5)", fillcolor="red"];
[f39f667a]257 *     b [label="B (2)"];
258 *     c -> a;
[c6522a65]259 *     i -> j;
260 *   }
261 *
262 *   subgraph {
263 *     rank = same;
264 *
265 *     p0 [label="PROCESSOR 0", shape="box"];
266 *     p1 [label="PROCESSOR 1", shape="box"];
267 *   }
268 *
[f39f667a]269 *   a -> p0;
[c6522a65]270 *   c -> p1;
271 * }
272 * @enddot
273 *
[9d83f58a]274 * @{
275 */
276
[8f0c7a46]277typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
[238629f]278  Scheduler_Context *context,
[8f0c7a46]279  Scheduler_Node    *node
[238629f]280);
281
[8f0c7a46]282typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
[238629f]283  Scheduler_Context *context,
[8f0c7a46]284  Scheduler_Node    *filter,
[238629f]285  Chain_Node_order   order
[48c4a55]286);
287
288typedef void ( *Scheduler_SMP_Extract )(
[3730a07f]289  Scheduler_Context *context,
[8f0c7a46]290  Scheduler_Node    *node_to_extract
[48c4a55]291);
292
293typedef void ( *Scheduler_SMP_Insert )(
[3730a07f]294  Scheduler_Context *context,
[8f0c7a46]295  Scheduler_Node    *node_to_insert
[48c4a55]296);
297
298typedef void ( *Scheduler_SMP_Move )(
[3730a07f]299  Scheduler_Context *context,
[8f0c7a46]300  Scheduler_Node    *node_to_move
[48c4a55]301);
302
[f39f667a]303typedef void ( *Scheduler_SMP_Update )(
304  Scheduler_Context *context,
[8f0c7a46]305  Scheduler_Node    *node_to_update,
[d9b54da]306  Priority_Control   new_priority
[f39f667a]307);
308
[8568341]309typedef Thread_Control *( *Scheduler_SMP_Enqueue )(
310  Scheduler_Context *context,
311  Scheduler_Node    *node_to_enqueue,
312  Thread_Control    *needs_help
313);
314
315typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )(
[f39f667a]316  Scheduler_Context *context,
[8f0c7a46]317  Scheduler_Node    *node_to_enqueue
[f39f667a]318);
319
[238629f]320typedef void ( *Scheduler_SMP_Allocate_processor )(
[8f0c7a46]321  Scheduler_Context *context,
[edb020c]322  Thread_Control    *scheduled_thread,
323  Thread_Control    *victim_thread,
324  Per_CPU_Control   *victim_cpu
[238629f]325);
326
[8f0c7a46]327static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
328  const Chain_Node *to_insert,
329  const Chain_Node *next
330)
331{
332  const Scheduler_SMP_Node *node_to_insert =
333    (const Scheduler_SMP_Node *) to_insert;
334  const Scheduler_SMP_Node *node_next =
335    (const Scheduler_SMP_Node *) next;
336
337  return node_to_insert->priority <= node_next->priority;
338}
339
340static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
341  const Chain_Node *to_insert,
342  const Chain_Node *next
343)
344{
345  const Scheduler_SMP_Node *node_to_insert =
346    (const Scheduler_SMP_Node *) to_insert;
347  const Scheduler_SMP_Node *node_next =
348    (const Scheduler_SMP_Node *) next;
349
350  return node_to_insert->priority < node_next->priority;
351}
352
[3730a07f]353static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
354  Scheduler_Context *context
355)
356{
357  return (Scheduler_SMP_Context *) context;
358}
359
[494c2e3]360static inline void _Scheduler_SMP_Initialize(
[e1598a6]361  Scheduler_SMP_Context *self
[494c2e3]362)
[9d83f58a]363{
[494c2e3]364  _Chain_Initialize_empty( &self->Scheduled );
[5c3d250]365  _Chain_Initialize_empty( &self->Idle_threads );
[9d83f58a]366}
367
[08d9760]368static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
[beab7329]369  Thread_Control *thread
370)
371{
[08d9760]372  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread );
[beab7329]373}
374
[5c3d250]375static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
376  Thread_Control *thread
377)
378{
[300f6a48]379  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_own_node( thread );
[5c3d250]380}
381
[8f0c7a46]382static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
383  Scheduler_Node *node
384)
385{
386  return (Scheduler_SMP_Node *) node;
387}
388
[501043a]389static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
390  const Scheduler_Node *node
391)
392{
393  return ( (const Scheduler_SMP_Node *) node )->state;
394}
395
396static inline Priority_Control _Scheduler_SMP_Node_priority(
397  const Scheduler_Node *node
398)
399{
400  return ( (const Scheduler_SMP_Node *) node )->priority;
401}
402
[beab7329]403static inline void _Scheduler_SMP_Node_initialize(
[300f6a48]404  const Scheduler_Control *scheduler,
405  Scheduler_SMP_Node      *node,
406  Thread_Control          *thread,
407  Priority_Control         priority
[beab7329]408)
409{
[300f6a48]410  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
[beab7329]411  node->state = SCHEDULER_SMP_NODE_BLOCKED;
[9bfad8c]412  node->priority = priority;
[beab7329]413}
414
[8f0c7a46]415static inline void _Scheduler_SMP_Node_update_priority(
416  Scheduler_SMP_Node *node,
417  Priority_Control    new_priority
418)
419{
420  node->priority = new_priority;
421}
422
[f39f667a]423extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
[beab7329]424
425static inline void _Scheduler_SMP_Node_change_state(
[3a72411]426  Scheduler_Node           *node,
427  Scheduler_SMP_Node_state  new_state
[beab7329]428)
429{
[3a72411]430  Scheduler_SMP_Node *the_node;
431
432  the_node = _Scheduler_SMP_Node_downcast( node );
[beab7329]433  _Assert(
[3a72411]434    _Scheduler_SMP_Node_valid_state_changes[ the_node->state ][ new_state ]
[beab7329]435  );
436
[3a72411]437  the_node->state = new_state;
[beab7329]438}
439
[38b59a6]440static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
[8f0c7a46]441  const Scheduler_Context *context,
442  const Per_CPU_Control   *cpu
[38b59a6]443)
444{
[8f0c7a46]445  return cpu->scheduler_context == context;
[38b59a6]446}
447
[5c3d250]448static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
[27783f6]449  Scheduler_Context *context
[5c3d250]450)
451{
452  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
453  Thread_Control *idle = (Thread_Control *)
454    _Chain_Get_first_unprotected( &self->Idle_threads );
455
456  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
457
458  return idle;
459}
460
461static inline void _Scheduler_SMP_Release_idle_thread(
[27783f6]462  Scheduler_Context *context,
463  Thread_Control    *idle
[5c3d250]464)
465{
466  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
467
468  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
469}
470
[19e41767]471static inline void _Scheduler_SMP_Allocate_processor_lazy(
[8f0c7a46]472  Scheduler_Context *context,
[19e41767]473  Thread_Control    *scheduled_thread,
[edb020c]474  Thread_Control    *victim_thread,
475  Per_CPU_Control   *victim_cpu
[fc2ad63]476)
477{
[8f0c7a46]478  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
[38b59a6]479  Per_CPU_Control *cpu_self = _Per_CPU_Get();
[fc2ad63]480  Thread_Control *heir;
481
[38b59a6]482  _Assert( _ISR_Get_level() != 0 );
[fc2ad63]483
[8f0c7a46]484  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
485    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
486      heir = scheduled_cpu->heir;
[835b88b]487      _Thread_Dispatch_update_heir(
[8f0c7a46]488        cpu_self,
489        scheduled_cpu,
490        scheduled_thread
491      );
[38b59a6]492    } else {
493      /* We have to force a migration to our processor set */
[8f0c7a46]494      heir = scheduled_thread;
[38b59a6]495    }
[fc2ad63]496  } else {
[8f0c7a46]497    heir = scheduled_thread;
[fc2ad63]498  }
499
[8f0c7a46]500  if ( heir != victim_thread ) {
501    _Thread_Set_CPU( heir, victim_cpu );
[835b88b]502    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
[fc2ad63]503  }
504}
505
[09c87fb]506/*
507 * This method is slightly different from
508 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
509 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
510 * but does not take into account affinity.
511 */
512static inline void _Scheduler_SMP_Allocate_processor_exact(
513  Scheduler_Context *context,
514  Thread_Control    *scheduled_thread,
[edb020c]515  Thread_Control    *victim_thread,
516  Per_CPU_Control   *victim_cpu
[09c87fb]517)
518{
519  Per_CPU_Control *cpu_self = _Per_CPU_Get();
520
521  (void) context;
522
523  _Thread_Set_CPU( scheduled_thread, victim_cpu );
524  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
525}
526
[19e41767]527static inline void _Scheduler_SMP_Allocate_processor(
528  Scheduler_Context                *context,
529  Scheduler_Node                   *scheduled,
[a7a8ec03]530  Thread_Control                   *victim_thread,
[edb020c]531  Per_CPU_Control                  *victim_cpu,
[19e41767]532  Scheduler_SMP_Allocate_processor  allocate_processor
533)
534{
[ac532f3]535  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
[19e41767]536
[3a72411]537  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
[19e41767]538
[edb020c]539  ( *allocate_processor )(
540    context,
541    scheduled_thread,
542    victim_thread,
543    victim_cpu
544  );
[19e41767]545}
546
[a7a8ec03]547static inline Thread_Control *_Scheduler_SMP_Preempt(
548  Scheduler_Context                *context,
549  Scheduler_Node                   *scheduled,
550  Scheduler_Node                   *victim,
551  Scheduler_SMP_Allocate_processor  allocate_processor
552)
553{
554  Thread_Control   *victim_thread;
555  ISR_lock_Context  lock_context;
[edb020c]556  Per_CPU_Control  *victim_cpu;
[a7a8ec03]557
558  victim_thread = _Scheduler_Node_get_user( victim );
559  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
560
561  _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
[edb020c]562  victim_cpu = _Thread_Get_CPU( victim_thread );
[a7a8ec03]563  _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
564  _Thread_Scheduler_release_critical( victim_thread, &lock_context );
565
566  _Scheduler_SMP_Allocate_processor(
567    context,
568    scheduled,
569    victim_thread,
[edb020c]570    victim_cpu,
[a7a8ec03]571    allocate_processor
572  );
573
574  return victim_thread;
575}
576
[8f0c7a46]577static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
[238629f]578  Scheduler_Context *context,
[8f0c7a46]579  Scheduler_Node    *filter,
[238629f]580  Chain_Node_order   order
[aea4a91]581)
582{
[238629f]583  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
[494c2e3]584  Chain_Control *scheduled = &self->Scheduled;
[8f0c7a46]585  Scheduler_Node *lowest_scheduled =
586    (Scheduler_Node *) _Chain_Last( scheduled );
[aea4a91]587
[8f0c7a46]588  (void) filter;
589  (void) order;
[aea4a91]590
[5c632c4]591  _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
[238629f]592
[8f0c7a46]593  return lowest_scheduled;
[aea4a91]594}
595
[5c3d250]596static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
597  Scheduler_Context                *context,
598  Scheduler_Node                   *node,
599  Scheduler_Node                   *lowest_scheduled,
600  Scheduler_SMP_Insert              insert_scheduled,
601  Scheduler_SMP_Move                move_from_scheduled_to_ready,
[27783f6]602  Scheduler_SMP_Allocate_processor  allocate_processor
[5c3d250]603)
604{
605  Thread_Control *needs_help;
[be0366b]606  Scheduler_Try_to_schedule_action action;
[5c3d250]607
[be0366b]608  action = _Scheduler_Try_to_schedule_node(
[5c3d250]609    context,
610    node,
[be0366b]611    _Scheduler_Node_get_idle( lowest_scheduled ),
612    _Scheduler_SMP_Get_idle_thread
[5c3d250]613  );
614
[be0366b]615  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
[a7a8ec03]616    Thread_Control *lowest_scheduled_user;
[be0366b]617    Thread_Control *idle;
[5c3d250]618
[a7a8ec03]619    lowest_scheduled_user = _Scheduler_SMP_Preempt(
[be0366b]620      context,
621      node,
622      lowest_scheduled,
623      allocate_processor
624    );
625
626    ( *insert_scheduled )( context, node );
627    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
628
629    idle = _Scheduler_Release_idle_thread(
630      context,
631      lowest_scheduled,
632      _Scheduler_SMP_Release_idle_thread
633    );
634    if ( idle == NULL ) {
635      needs_help = lowest_scheduled_user;
636    } else {
637      needs_help = NULL;
638    }
639  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
640    _Scheduler_SMP_Node_change_state(
[3a72411]641      lowest_scheduled,
[be0366b]642      SCHEDULER_SMP_NODE_READY
643    );
[3a72411]644    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
[be0366b]645
646    ( *insert_scheduled )( context, node );
647    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
648
649    _Scheduler_Exchange_idle_thread(
650      node,
651      lowest_scheduled,
652      _Scheduler_Node_get_idle( lowest_scheduled )
653    );
654
655    needs_help = NULL;
[5c3d250]656  } else {
[be0366b]657    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[3a72411]658    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
[5c3d250]659    needs_help = NULL;
660  }
661
662  return needs_help;
663}
664
[c6522a65]665/**
[8f0c7a46]666 * @brief Enqueues a node according to the specified order function.
[c6522a65]667 *
[8f0c7a46]668 * The node must not be in the scheduled state.
[c0bff5e]669 *
[c6522a65]670 * @param[in] context The scheduler instance context.
[8f0c7a46]671 * @param[in] node The node to enqueue.
[8568341]672 * @param[in] needs_help The thread needing help in case the node cannot be
673 *   scheduled.
[c6522a65]674 * @param[in] order The order function.
675 * @param[in] insert_ready Function to insert a node into the set of ready
[238629f]676 *   nodes.
[c6522a65]677 * @param[in] insert_scheduled Function to insert a node into the set of
[238629f]678 *   scheduled nodes.
[c6522a65]679 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
[238629f]680 *   of scheduled nodes to the set of ready nodes.
[8f0c7a46]681 * @param[in] get_lowest_scheduled Function to select the node from the
[82df6f3]682 *   scheduled nodes to replace.  It may not be possible to find one, in this
683 *   case a pointer must be returned so that the order functions returns false
684 *   if this pointer is passed as the second argument to the order function.
[8f0c7a46]685 * @param[in] allocate_processor Function to allocate a processor to a node
[238629f]686 *   based on the rules of the scheduler.
[c6522a65]687 */
[8568341]688static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
[d9b54da]689  Scheduler_Context                  *context,
[8f0c7a46]690  Scheduler_Node                     *node,
[8568341]691  Thread_Control                     *needs_help,
[d9b54da]692  Chain_Node_order                    order,
[238629f]693  Scheduler_SMP_Insert                insert_ready,
694  Scheduler_SMP_Insert                insert_scheduled,
695  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
696  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
[27783f6]697  Scheduler_SMP_Allocate_processor    allocate_processor
[48c4a55]698)
[c0bff5e]699{
[8f0c7a46]700  Scheduler_Node *lowest_scheduled =
701    ( *get_lowest_scheduled )( context, node, order );
[c0bff5e]702
[8f0c7a46]703  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
[5c3d250]704    needs_help = _Scheduler_SMP_Enqueue_to_scheduled(
[19e41767]705      context,
706      node,
707      lowest_scheduled,
[5c3d250]708      insert_scheduled,
709      move_from_scheduled_to_ready,
[27783f6]710      allocate_processor
[19e41767]711    );
[c0bff5e]712  } else {
[8f0c7a46]713    ( *insert_ready )( context, node );
[c0bff5e]714  }
[8568341]715
716  return needs_help;
[c0bff5e]717}
718
719/**
[8f0c7a46]720 * @brief Enqueues a scheduled node according to the specified order
[c0bff5e]721 * function.
722 *
723 * @param[in] context The scheduler instance context.
[8f0c7a46]724 * @param[in] node The node to enqueue.
[c0bff5e]725 * @param[in] order The order function.
[5c3d250]726 * @param[in] extract_from_ready Function to extract a node from the set of
727 *   ready nodes.
[c0bff5e]728 * @param[in] get_highest_ready Function to get the highest ready node.
729 * @param[in] insert_ready Function to insert a node into the set of ready
[238629f]730 *   nodes.
[c0bff5e]731 * @param[in] insert_scheduled Function to insert a node into the set of
[238629f]732 *   scheduled nodes.
[c0bff5e]733 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
[238629f]734 *   of ready nodes to the set of scheduled nodes.
[8f0c7a46]735 * @param[in] allocate_processor Function to allocate a processor to a node
[238629f]736 *   based on the rules of the scheduler.
[c0bff5e]737 */
[8568341]738static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
[d9b54da]739  Scheduler_Context                *context,
[8f0c7a46]740  Scheduler_Node                   *node,
[238629f]741  Chain_Node_order                  order,
[5c3d250]742  Scheduler_SMP_Extract             extract_from_ready,
[238629f]743  Scheduler_SMP_Get_highest_ready   get_highest_ready,
744  Scheduler_SMP_Insert              insert_ready,
745  Scheduler_SMP_Insert              insert_scheduled,
746  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]747  Scheduler_SMP_Allocate_processor  allocate_processor
[c0bff5e]748)
[48c4a55]749{
[d057d653]750  while ( true ) {
751    Scheduler_Node                   *highest_ready;
752    Scheduler_Try_to_schedule_action  action;
[48c4a55]753
[d057d653]754    highest_ready = ( *get_highest_ready )( context, node );
[5c3d250]755
756    /*
757     * The node has been extracted from the scheduled chain.  We have to place
758     * it now on the scheduled or ready set.
759     */
760    if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
761      ( *insert_scheduled )( context, node );
[d057d653]762      return NULL;
763    }
[5c3d250]764
[d057d653]765    action = _Scheduler_Try_to_schedule_node(
766      context,
767      highest_ready,
768      _Scheduler_Node_get_idle( node ),
769      _Scheduler_SMP_Get_idle_thread
770    );
771
772    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
[a7a8ec03]773      Thread_Control *user;
[d057d653]774      Thread_Control *idle;
[5c3d250]775
[a7a8ec03]776      user = _Scheduler_SMP_Preempt(
[5c3d250]777        context,
778        highest_ready,
[d057d653]779        node,
780        allocate_processor
[5c3d250]781      );
[c0bff5e]782
[d057d653]783      ( *insert_ready )( context, node );
784      ( *move_from_ready_to_scheduled )( context, highest_ready );
785
786      idle = _Scheduler_Release_idle_thread(
787        context,
788        node,
789        _Scheduler_SMP_Release_idle_thread
790      );
791
792      if ( idle == NULL ) {
793        return user;
[be0366b]794      } else {
[d057d653]795        return NULL;
796      }
797    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
[3a72411]798      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
[d057d653]799      _Scheduler_SMP_Node_change_state(
[3a72411]800        highest_ready,
[d057d653]801        SCHEDULER_SMP_NODE_SCHEDULED
802      );
[19e41767]803
[d057d653]804      ( *insert_ready )( context, node );
805      ( *move_from_ready_to_scheduled )( context, highest_ready );
[8568341]806
[d057d653]807      _Scheduler_Exchange_idle_thread(
808        highest_ready,
809        node,
810        _Scheduler_Node_get_idle( node )
811      );
812      return NULL;
813    } else {
814      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[be0366b]815
[d057d653]816      _Scheduler_SMP_Node_change_state(
[3a72411]817        highest_ready,
[d057d653]818        SCHEDULER_SMP_NODE_BLOCKED
819      );
[8568341]820
[d057d653]821      ( *extract_from_ready )( context, highest_ready );
822    }
823  }
[48c4a55]824}
825
[c0bff5e]826static inline void _Scheduler_SMP_Extract_from_scheduled(
[8f0c7a46]827  Scheduler_Node *node
[c0bff5e]828)
[f39f667a]829{
[8f0c7a46]830  _Chain_Extract_unprotected( &node->Node );
[f39f667a]831}
832
[48c4a55]833static inline void _Scheduler_SMP_Schedule_highest_ready(
[d9b54da]834  Scheduler_Context                *context,
[8f0c7a46]835  Scheduler_Node                   *victim,
[edb020c]836  Per_CPU_Control                  *victim_cpu,
[5c3d250]837  Scheduler_SMP_Extract             extract_from_ready,
[d9b54da]838  Scheduler_SMP_Get_highest_ready   get_highest_ready,
839  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]840  Scheduler_SMP_Allocate_processor  allocate_processor
[48c4a55]841)
842{
[d057d653]843  Scheduler_Try_to_schedule_action action;
844
[be0366b]845  do {
[5c3d250]846    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
847
[be0366b]848    action = _Scheduler_Try_to_schedule_node(
849      context,
850      highest_ready,
851      NULL,
852      _Scheduler_SMP_Get_idle_thread
853    );
854
855    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
[5c3d250]856      _Scheduler_SMP_Allocate_processor(
857        context,
858        highest_ready,
[a7a8ec03]859        _Scheduler_Node_get_user( victim ),
[edb020c]860        victim_cpu,
[5c3d250]861        allocate_processor
862      );
[48c4a55]863
[5c3d250]864      ( *move_from_ready_to_scheduled )( context, highest_ready );
865    } else {
[be0366b]866      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
867
[5c3d250]868      _Scheduler_SMP_Node_change_state(
[3a72411]869        highest_ready,
[5c3d250]870        SCHEDULER_SMP_NODE_BLOCKED
871      );
[19e41767]872
[5c3d250]873      ( *extract_from_ready )( context, highest_ready );
874    }
[d057d653]875  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
[48c4a55]876}
877
[c6522a65]878/**
[f39f667a]879 * @brief Blocks a thread.
[c6522a65]880 *
881 * @param[in] context The scheduler instance context.
882 * @param[in] thread The thread of the scheduling operation.
[e382a1b]883 * @param[in] node The scheduler node of the thread to block.
[f39f667a]884 * @param[in] extract_from_ready Function to extract a node from the set of
[5c3d250]885 *   ready nodes.
[c6522a65]886 * @param[in] get_highest_ready Function to get the highest ready node.
887 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
[5c3d250]888 *   of ready nodes to the set of scheduled nodes.
[c6522a65]889 */
[f39f667a]890static inline void _Scheduler_SMP_Block(
[d9b54da]891  Scheduler_Context                *context,
892  Thread_Control                   *thread,
[e382a1b]893  Scheduler_Node                   *node,
[d9b54da]894  Scheduler_SMP_Extract             extract_from_ready,
895  Scheduler_SMP_Get_highest_ready   get_highest_ready,
896  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]897  Scheduler_SMP_Allocate_processor  allocate_processor
[48c4a55]898)
899{
[edb020c]900  Scheduler_SMP_Node_state  node_state;
901  Per_CPU_Control          *thread_cpu;
[cceb19f4]902
[e382a1b]903  node_state = _Scheduler_SMP_Node_state( node );
904  _Assert( node_state != SCHEDULER_SMP_NODE_BLOCKED );
[cceb19f4]905
[edb020c]906  thread_cpu = _Scheduler_Block_node(
[5c3d250]907    context,
[cceb19f4]908    thread,
[e382a1b]909    node,
910    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
[27783f6]911    _Scheduler_SMP_Get_idle_thread
[5c3d250]912  );
[edb020c]913  if ( thread_cpu != NULL ) {
[3a72411]914    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
[beab7329]915
[e382a1b]916    if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
917      _Scheduler_SMP_Extract_from_scheduled( node );
[5c3d250]918      _Scheduler_SMP_Schedule_highest_ready(
919        context,
[e382a1b]920        node,
[edb020c]921        thread_cpu,
[5c3d250]922        extract_from_ready,
923        get_highest_ready,
924        move_from_ready_to_scheduled,
[27783f6]925        allocate_processor
[5c3d250]926      );
927    } else {
[e382a1b]928      ( *extract_from_ready )( context, node );
[5c3d250]929    }
[48c4a55]930  }
931}
932
[8568341]933static inline Thread_Control *_Scheduler_SMP_Unblock(
[9bfad8c]934  Scheduler_Context     *context,
935  Thread_Control        *thread,
[72e0bdb]936  Scheduler_Node        *node,
[9bfad8c]937  Scheduler_SMP_Update   update,
938  Scheduler_SMP_Enqueue  enqueue_fifo
[c0bff5e]939)
940{
[72e0bdb]941  Scheduler_SMP_Node_state  node_state;
942  bool                      unblock;
943  Thread_Control           *needs_help;
[9bfad8c]944
[72e0bdb]945  node_state = _Scheduler_SMP_Node_state( node );
[9bfad8c]946  unblock = _Scheduler_Unblock_node(
[5c3d250]947    context,
948    thread,
[72e0bdb]949    node,
950    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
[27783f6]951    _Scheduler_SMP_Release_idle_thread
[5c3d250]952  );
[c0bff5e]953
[5c3d250]954  if ( unblock ) {
[9bfad8c]955    Priority_Control new_priority;
956    bool             prepend_it;
957
[72e0bdb]958    new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
[9bfad8c]959    (void) prepend_it;
960
[72e0bdb]961    if ( new_priority != _Scheduler_SMP_Node_priority( node ) ) {
962      ( *update )( context, node, new_priority );
[9bfad8c]963    }
964
[72e0bdb]965    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
[3a72411]966      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
[cceb19f4]967
[72e0bdb]968      needs_help = ( *enqueue_fifo )( context, node, thread );
[cceb19f4]969    } else {
[72e0bdb]970      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
[79569ae]971      _Assert(
[72e0bdb]972        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
973          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
[79569ae]974      );
[72e0bdb]975      _Assert( node->idle == NULL );
[c0bff5e]976
[72e0bdb]977      if ( node->accepts_help == thread ) {
[cceb19f4]978        needs_help = thread;
979      } else {
980        needs_help = NULL;
981      }
982    }
[5c3d250]983  } else {
984    needs_help = NULL;
985  }
986
987  return needs_help;
[c0bff5e]988}
989
[9bfad8c]990static inline Thread_Control *_Scheduler_SMP_Update_priority(
[8568341]991  Scheduler_Context               *context,
992  Thread_Control                  *thread,
[501043a]993  Scheduler_Node                  *node,
[8568341]994  Scheduler_SMP_Extract            extract_from_ready,
995  Scheduler_SMP_Update             update,
996  Scheduler_SMP_Enqueue            enqueue_fifo,
997  Scheduler_SMP_Enqueue            enqueue_lifo,
998  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo,
999  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_lifo
[48c4a55]1000)
1001{
[501043a]1002  Thread_Control          *needs_help;
1003  Priority_Control         new_priority;
1004  bool                     prepend_it;
1005  Scheduler_SMP_Node_state node_state;
[9bfad8c]1006
[501043a]1007  new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
[9bfad8c]1008
[501043a]1009  if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) {
[9bfad8c]1010    /* Nothing to do */
1011    return NULL;
1012  }
[a336d51]1013
[501043a]1014  node_state = _Scheduler_SMP_Node_state( node );
1015
1016  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1017    _Scheduler_SMP_Extract_from_scheduled( node );
[c0bff5e]1018
[501043a]1019    ( *update )( context, node, new_priority );
[c0bff5e]1020
1021    if ( prepend_it ) {
[501043a]1022      needs_help = ( *enqueue_scheduled_lifo )( context, node );
[c0bff5e]1023    } else {
[501043a]1024      needs_help = ( *enqueue_scheduled_fifo )( context, node );
[c0bff5e]1025    }
[501043a]1026  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1027    ( *extract_from_ready )( context, node );
[48c4a55]1028
[501043a]1029    ( *update )( context, node, new_priority );
[f39f667a]1030
[c0bff5e]1031    if ( prepend_it ) {
[501043a]1032      needs_help = ( *enqueue_lifo )( context, node, NULL );
[c0bff5e]1033    } else {
[501043a]1034      needs_help = ( *enqueue_fifo )( context, node, NULL );
[c0bff5e]1035    }
[5c3d250]1036  } else {
[501043a]1037    ( *update )( context, node, new_priority );
[5c3d250]1038
1039    needs_help = NULL;
[f39f667a]1040  }
[8568341]1041
1042  return needs_help;
[48c4a55]1043}
1044
[d097b546]1045static inline Thread_Control *_Scheduler_SMP_Ask_for_help_X(
[5c3d250]1046  Scheduler_Context                  *context,
1047  Thread_Control                     *offers_help,
1048  Thread_Control                     *needs_help,
[27783f6]1049  Scheduler_SMP_Enqueue               enqueue_fifo
[5c3d250]1050)
1051{
1052  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help );
1053  Thread_Control *next_needs_help = NULL;
1054  Thread_Control *previous_accepts_help;
1055
1056  previous_accepts_help = node->Base.accepts_help;
1057  node->Base.accepts_help = needs_help;
1058
1059  switch ( node->state ) {
1060    case SCHEDULER_SMP_NODE_READY:
1061      next_needs_help =
1062        _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help );
1063      break;
1064    case SCHEDULER_SMP_NODE_SCHEDULED:
1065      next_needs_help = _Scheduler_Ask_scheduled_node_for_help(
1066        context,
1067        &node->Base,
1068        offers_help,
1069        needs_help,
1070        previous_accepts_help,
[27783f6]1071        _Scheduler_SMP_Release_idle_thread
[5c3d250]1072      );
1073      break;
1074    case SCHEDULER_SMP_NODE_BLOCKED:
1075      if (
1076        _Scheduler_Ask_blocked_node_for_help(
1077          context,
1078          &node->Base,
1079          offers_help,
1080          needs_help
1081        )
1082      ) {
[3a72411]1083        _Scheduler_SMP_Node_change_state(
1084          &node->Base,
1085          SCHEDULER_SMP_NODE_READY
1086        );
[5c3d250]1087
1088        next_needs_help = ( *enqueue_fifo )(
1089          context,
1090          &node->Base,
1091          needs_help
1092        );
1093      }
1094      break;
1095  }
1096
1097  return next_needs_help;
1098}
1099
[8568341]1100static inline Thread_Control *_Scheduler_SMP_Yield(
1101  Scheduler_Context               *context,
1102  Thread_Control                  *thread,
[2df4abc]1103  Scheduler_Node                  *node,
[8568341]1104  Scheduler_SMP_Extract            extract_from_ready,
1105  Scheduler_SMP_Enqueue            enqueue_fifo,
1106  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo
[701dd96f]1107)
1108{
[8568341]1109  Thread_Control *needs_help;
[701dd96f]1110
[2df4abc]1111  if ( _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_SCHEDULED ) {
1112    _Scheduler_SMP_Extract_from_scheduled( node );
[701dd96f]1113
[2df4abc]1114    needs_help = ( *enqueue_scheduled_fifo )( context, node );
[701dd96f]1115  } else {
[2df4abc]1116    ( *extract_from_ready )( context, node );
[701dd96f]1117
[2df4abc]1118    needs_help = ( *enqueue_fifo )( context, node, NULL );
[701dd96f]1119  }
[8568341]1120
1121  return needs_help;
[701dd96f]1122}
1123
[48c4a55]1124static inline void _Scheduler_SMP_Insert_scheduled_lifo(
[3730a07f]1125  Scheduler_Context *context,
[8f0c7a46]1126  Scheduler_Node    *node_to_insert
[48c4a55]1127)
1128{
[3730a07f]1129  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1130
[48c4a55]1131  _Chain_Insert_ordered_unprotected(
[494c2e3]1132    &self->Scheduled,
[8f0c7a46]1133    &node_to_insert->Node,
1134    _Scheduler_SMP_Insert_priority_lifo_order
[48c4a55]1135  );
1136}
1137
1138static inline void _Scheduler_SMP_Insert_scheduled_fifo(
[3730a07f]1139  Scheduler_Context *context,
[8f0c7a46]1140  Scheduler_Node    *node_to_insert
[48c4a55]1141)
1142{
[3730a07f]1143  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1144
[48c4a55]1145  _Chain_Insert_ordered_unprotected(
[494c2e3]1146    &self->Scheduled,
[8f0c7a46]1147    &node_to_insert->Node,
1148    _Scheduler_SMP_Insert_priority_fifo_order
[48c4a55]1149  );
1150}
1151
[9d83f58a]1152/** @} */
1153
1154#ifdef __cplusplus
1155}
1156#endif /* __cplusplus */
1157
1158#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.