source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ e9ee2f0

4.115
Last change on this file since e9ee2f0 was c6522a65, checked in by Sebastian Huber <sebastian.huber@…>, on 05/12/14 at 06:38:38

score: SMP scheduler documentation

  • Property mode set to 100644
File size: 15.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED,
41 * - @ref SCHEDULER_SMP_NODE_READY, and
42 * - @ref SCHEDULER_SMP_NODE_IN_THE_AIR.
43 *
44 * State transitions are triggered via basic three operations
45 * - _Scheduler_SMP_Enqueue_ordered(),
46 * - _Scheduler_SMP_Extract(), and
47 * - _Scheduler_SMP_Schedule().
48 *
49 * @dot
50 * digraph {
51 *   node [style="filled"];
52 *
53 *   bs [label="BLOCKED"];
54 *   ss [label="SCHEDULED", fillcolor="green"];
55 *   rs [label="READY", fillcolor="red"];
56 *   as [label="IN THE AIR", fillcolor="orange"];
57 *
58 *   edge [label="enqueue"];
59 *   edge [fontcolor="darkgreen", color="darkgreen"];
60 *
61 *   bs -> ss;
62 *   as -> ss;
63 *
64 *   edge [label="enqueue"];
65 *   edge [fontcolor="red", color="red"];
66 *
67 *   bs -> rs;
68 *   as -> rs;
69 *
70 *   edge [label="enqueue other"];
71 *
72 *   ss -> rs;
73 *
74 *   edge [label="schedule"];
75 *   edge [fontcolor="black", color="black"];
76 *
77 *   as -> bs;
78 *
79 *   edge [label="extract"];
80 *   edge [fontcolor="brown", color="brown"];
81 *
82 *   ss -> as;
83 *
84 *   edge [fontcolor="black", color="black"];
85 *
86 *   rs -> bs;
87 *
88 *   edge [label="enqueue other\nschedule other"];
89 *   edge [fontcolor="darkgreen", color="darkgreen"];
90 *
91 *   rs -> ss;
92 * }
93 * @enddot
94 *
95 * During system initialization each processor of the scheduler instance starts
96 * with an idle thread assigned to it.  Lets have a look at an example with two
97 * idle threads I and J with priority 5.  We also have blocked threads A, B and
98 * C with priorities 1, 2 and 3 respectively.
99 *
100 * @dot
101 * digraph {
102 *   node [style="filled"];
103 *   edge [dir="none"];
104 *   subgraph {
105 *     rank = same;
106 *
107 *     i [label="I (5)", fillcolor="green"];
108 *     j [label="J (5)", fillcolor="green"];
109 *     a [label="A (1)"];
110 *     b [label="B (2)"];
111 *     c [label="C (3)"];
112 *     i -> j;
113 *   }
114 *
115 *   subgraph {
116 *     rank = same;
117 *
118 *     p0 [label="PROCESSOR 0", shape="box"];
119 *     p1 [label="PROCESSOR 1", shape="box"];
120 *   }
121 *
122 *   i -> p0;
123 *   j -> p1;
124 * }
125 * @enddot
126 *
127 * Lets start A.  For this an enqueue operation is performed.
128 *
129 * @dot
130 * digraph {
131 *   node [style="filled"];
132 *   edge [dir="none"];
133 *
134 *   subgraph {
135 *     rank = same;
136 *
137 *     i [label="I (5)", fillcolor="green"];
138 *     j [label="J (5)", fillcolor="red"];
139 *     a [label="A (1)", fillcolor="green"];
140 *     b [label="B (2)"];
141 *     c [label="C (3)"];
142 *     a -> i;
143 *   }
144 *
145 *   subgraph {
146 *     rank = same;
147 *
148 *     p0 [label="PROCESSOR 0", shape="box"];
149 *     p1 [label="PROCESSOR 1", shape="box"];
150 *   }
151 *
152 *   i -> p0;
153 *   a -> p1;
154 * }
155 * @enddot
156 *
157 * Lets start C.
158 *
159 * @dot
160 * digraph {
161 *   node [style="filled"];
162 *   edge [dir="none"];
163 *
164 *   subgraph {
165 *     rank = same;
166 *
167 *     a [label="A (1)", fillcolor="green"];
168 *     c [label="C (3)", fillcolor="green"];
169 *     i [label="I (5)", fillcolor="red"];
170 *     j [label="J (5)", fillcolor="red"];
171 *     b [label="B (2)"];
172 *     a -> c;
173 *     i -> j;
174 *   }
175 *
176 *   subgraph {
177 *     rank = same;
178 *
179 *     p0 [label="PROCESSOR 0", shape="box"];
180 *     p1 [label="PROCESSOR 1", shape="box"];
181 *   }
182 *
183 *   c -> p0;
184 *   a -> p1;
185 * }
186 * @enddot
187 *
188 * Lets start B.
189 *
190 * @dot
191 * digraph {
192 *   node [style="filled"];
193 *   edge [dir="none"];
194 *
195 *   subgraph {
196 *     rank = same;
197 *
198 *     a [label="A (1)", fillcolor="green"];
199 *     b [label="B (2)", fillcolor="green"];
200 *     c [label="C (3)", fillcolor="red"];
201 *     i [label="I (5)", fillcolor="red"];
202 *     j [label="J (5)", fillcolor="red"];
203 *     a -> b;
204 *     c -> i -> j;
205 *   }
206 *
207 *   subgraph {
208 *     rank = same;
209 *
210 *     p0 [label="PROCESSOR 0", shape="box"];
211 *     p1 [label="PROCESSOR 1", shape="box"];
212 *   }
213 *
214 *   b -> p0;
215 *   a -> p1;
216 * }
217 * @enddot
218 *
219 * Lets do something with A.  This can be a blocking operation or a priority
220 * change.  For this an extract operation is performed first.
221 *
222 * @dot
223 * digraph {
224 *   node [style="filled"];
225 *   edge [dir="none"];
226 *
227 *   subgraph {
228 *     rank = same;
229 *
230 *     b [label="B (2)", fillcolor="green"];
231 *     a [label="A (1)", fillcolor="orange"];
232 *     c [label="C (3)", fillcolor="red"];
233 *     i [label="I (5)", fillcolor="red"];
234 *     j [label="J (5)", fillcolor="red"];
235 *     c -> i -> j;
236 *   }
237 *
238 *   subgraph {
239 *     rank = same;
240 *
241 *     p0 [label="PROCESSOR 0", shape="box"];
242 *     p1 [label="PROCESSOR 1", shape="box"];
243 *   }
244 *
245 *   b -> p0;
246 *   a -> p1;
247 * }
248 * @enddot
249 *
250 * Lets change the priority of thread A to 4 and enqueue it.
251 *
252 * @dot
253 * digraph {
254 *   node [style="filled"];
255 *   edge [dir="none"];
256 *
257 *   subgraph {
258 *     rank = same;
259 *
260 *     b [label="B (2)", fillcolor="green"];
261 *     c [label="C (3)", fillcolor="green"];
262 *     a [label="A (4)", fillcolor="red"];
263 *     i [label="I (5)", fillcolor="red"];
264 *     j [label="J (5)", fillcolor="red"];
265 *     b -> c;
266 *     a -> i -> j;
267 *   }
268 *
269 *   subgraph {
270 *     rank = same;
271 *
272 *     p0 [label="PROCESSOR 0", shape="box"];
273 *     p1 [label="PROCESSOR 1", shape="box"];
274 *   }
275 *
276 *   b -> p0;
277 *   c -> p1;
278 * }
279 * @enddot
280 *
281 * Alternatively we can also do a blocking operation with thread A.  In this
282 * case schedule will be called.
283 *
284 * @dot
285 * digraph {
286 *   node [style="filled"];
287 *   edge [dir="none"];
288 *
289 *   subgraph {
290 *     rank = same;
291 *
292 *     b [label="B (2)", fillcolor="green"];
293 *     c [label="C (3)", fillcolor="green"];
294 *     i [label="I (5)", fillcolor="red"];
295 *     j [label="J (5)", fillcolor="red"];
296 *     a [label="A (1)"];
297 *     b -> c;
298 *     i -> j;
299 *   }
300 *
301 *   subgraph {
302 *     rank = same;
303 *
304 *     p0 [label="PROCESSOR 0", shape="box"];
305 *     p1 [label="PROCESSOR 1", shape="box"];
306 *   }
307 *
308 *   b -> p0;
309 *   c -> p1;
310 * }
311 * @enddot
312 *
313 * @{
314 */
315
316typedef Thread_Control *( *Scheduler_SMP_Get_highest_ready )(
317  Scheduler_SMP_Context *self
318);
319
320typedef void ( *Scheduler_SMP_Extract )(
321  Scheduler_SMP_Context *self,
322  Thread_Control *thread
323);
324
325typedef void ( *Scheduler_SMP_Insert )(
326  Scheduler_SMP_Context *self,
327  Thread_Control *thread_to_insert
328);
329
330typedef void ( *Scheduler_SMP_Move )(
331  Scheduler_SMP_Context *self,
332  Thread_Control *thread_to_move
333);
334
335static inline void _Scheduler_SMP_Initialize(
336  Scheduler_SMP_Context *self
337)
338{
339  _Chain_Initialize_empty( &self->Scheduled );
340}
341
342static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_get(
343  Thread_Control *thread
344)
345{
346  return (Scheduler_SMP_Node *) _Scheduler_Node_get( thread );
347}
348
349static inline void _Scheduler_SMP_Node_initialize(
350  Scheduler_SMP_Node *node
351)
352{
353  node->state = SCHEDULER_SMP_NODE_BLOCKED;
354}
355
356extern const bool _Scheduler_SMP_Node_valid_state_changes[ 4 ][ 4 ];
357
358static inline void _Scheduler_SMP_Node_change_state(
359  Scheduler_SMP_Node *node,
360  Scheduler_SMP_Node_state new_state
361)
362{
363  _Assert(
364    _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ]
365  );
366
367  node->state = new_state;
368}
369
370static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
371  const Scheduler_SMP_Context *self,
372  const Per_CPU_Control *cpu
373)
374{
375  return cpu->scheduler_context == &self->Base;
376}
377
378static inline void _Scheduler_SMP_Update_heir(
379  Per_CPU_Control *cpu_self,
380  Per_CPU_Control *cpu_for_heir,
381  Thread_Control *heir
382)
383{
384  cpu_for_heir->heir = heir;
385
386  /*
387   * It is critical that we first update the heir and then the dispatch
388   * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
389   * update.
390   */
391  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
392
393  /*
394   * Only update the dispatch necessary indicator if not already set to
395   * avoid superfluous inter-processor interrupts.
396   */
397  if ( !cpu_for_heir->dispatch_necessary ) {
398    cpu_for_heir->dispatch_necessary = true;
399
400    if ( cpu_for_heir != cpu_self ) {
401      _Per_CPU_Send_interrupt( cpu_for_heir );
402    }
403  }
404}
405
406static inline void _Scheduler_SMP_Allocate_processor(
407  Scheduler_SMP_Context *self,
408  Thread_Control *scheduled,
409  Thread_Control *victim
410)
411{
412  Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled );
413  Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
414  Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim );
415  Per_CPU_Control *cpu_self = _Per_CPU_Get();
416  Thread_Control *heir;
417
418  _Scheduler_SMP_Node_change_state(
419    scheduled_node,
420    SCHEDULER_SMP_NODE_SCHEDULED
421  );
422
423  _Assert( _ISR_Get_level() != 0 );
424
425  if ( _Thread_Is_executing_on_a_processor( scheduled ) ) {
426    if ( _Scheduler_SMP_Is_processor_owned_by_us( self, cpu_of_scheduled ) ) {
427      heir = cpu_of_scheduled->heir;
428      _Scheduler_SMP_Update_heir( cpu_self, cpu_of_scheduled, scheduled );
429    } else {
430      /* We have to force a migration to our processor set */
431      _Assert( scheduled->debug_real_cpu->heir != scheduled );
432      heir = scheduled;
433    }
434  } else {
435    heir = scheduled;
436  }
437
438  if ( heir != victim ) {
439    _Thread_Set_CPU( heir, cpu_of_victim );
440    _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, heir );
441  }
442}
443
444static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
445  Scheduler_SMP_Context *self
446)
447{
448  Thread_Control *lowest_ready = NULL;
449  Chain_Control *scheduled = &self->Scheduled;
450
451  if ( !_Chain_Is_empty( scheduled ) ) {
452    lowest_ready = (Thread_Control *) _Chain_Last( scheduled );
453  }
454
455  return lowest_ready;
456}
457
458/**
459 * @brief Enqueues a thread according to the specified order function.
460 *
461 * @param[in] context The scheduler instance context.
462 * @param[in] thread The thread to enqueue.
463 * @param[in] order The order function.
464 * @param[in] get_highest_ready Function to get the highest ready node.
465 * @param[in] insert_ready Function to insert a node into the set of ready
466 * nodes.
467 * @param[in] insert_scheduled Function to insert a node into the set of
468 * scheduled nodes.
469 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
470 * of ready nodes to the set of scheduled nodes.
471 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
472 * of scheduled nodes to the set of ready nodes.
473 */
474static inline void _Scheduler_SMP_Enqueue_ordered(
475  Scheduler_SMP_Context *self,
476  Thread_Control *thread,
477  Chain_Node_order order,
478  Scheduler_SMP_Get_highest_ready get_highest_ready,
479  Scheduler_SMP_Insert insert_ready,
480  Scheduler_SMP_Insert insert_scheduled,
481  Scheduler_SMP_Move move_from_ready_to_scheduled,
482  Scheduler_SMP_Move move_from_scheduled_to_ready
483)
484{
485  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
486
487  if ( node->state == SCHEDULER_SMP_NODE_IN_THE_AIR ) {
488    Thread_Control *highest_ready = ( *get_highest_ready )( self );
489
490    /*
491     * The thread has been extracted from the scheduled chain.  We have to
492     * place it now on the scheduled or ready chain.
493     *
494     * NOTE: Do not exchange parameters to do the negation of the order check.
495     */
496    if (
497      highest_ready != NULL
498        && !( *order )( &thread->Object.Node, &highest_ready->Object.Node )
499    ) {
500      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
501      _Scheduler_SMP_Allocate_processor( self, highest_ready, thread );
502      ( *insert_ready )( self, thread );
503      ( *move_from_ready_to_scheduled )( self, highest_ready );
504    } else {
505      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
506      ( *insert_scheduled )( self, thread );
507    }
508  } else {
509    Thread_Control *lowest_scheduled = _Scheduler_SMP_Get_lowest_scheduled( self );
510
511    /*
512     * The scheduled chain is empty if nested interrupts change the priority of
513     * all scheduled threads.  These threads are in the air.
514     */
515    if (
516      lowest_scheduled != NULL
517        && ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node )
518    ) {
519      Scheduler_SMP_Node *lowest_scheduled_node =
520        _Scheduler_SMP_Node_get( lowest_scheduled );
521
522      _Scheduler_SMP_Node_change_state(
523        lowest_scheduled_node,
524        SCHEDULER_SMP_NODE_READY
525      );
526      _Scheduler_SMP_Allocate_processor( self, thread, lowest_scheduled );
527      ( *insert_scheduled )( self, thread );
528      ( *move_from_scheduled_to_ready )( self, lowest_scheduled );
529    } else {
530      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
531      ( *insert_ready )( self, thread );
532    }
533  }
534}
535
536static inline void _Scheduler_SMP_Schedule_highest_ready(
537  Scheduler_SMP_Context *self,
538  Thread_Control *victim,
539  Scheduler_SMP_Get_highest_ready get_highest_ready,
540  Scheduler_SMP_Move move_from_ready_to_scheduled
541)
542{
543  Thread_Control *highest_ready = ( *get_highest_ready )( self );
544
545  _Scheduler_SMP_Allocate_processor( self, highest_ready, victim );
546
547  ( *move_from_ready_to_scheduled )( self, highest_ready );
548}
549
550/**
551 * @brief Finalize a scheduling operation.
552 *
553 * @param[in] context The scheduler instance context.
554 * @param[in] thread The thread of the scheduling operation.
555 * @param[in] get_highest_ready Function to get the highest ready node.
556 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
557 * of ready nodes to the set of scheduled nodes.
558 */
559static inline void _Scheduler_SMP_Schedule(
560  Scheduler_SMP_Context *self,
561  Thread_Control *thread,
562  Scheduler_SMP_Get_highest_ready get_highest_ready,
563  Scheduler_SMP_Move move_from_ready_to_scheduled
564)
565{
566  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
567
568  if ( node->state == SCHEDULER_SMP_NODE_IN_THE_AIR ) {
569    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
570
571    _Scheduler_SMP_Schedule_highest_ready(
572      self,
573      thread,
574      get_highest_ready,
575      move_from_ready_to_scheduled
576    );
577  }
578}
579
580static inline void _Scheduler_SMP_Block(
581  Scheduler_SMP_Context *self,
582  Thread_Control *thread,
583  Scheduler_SMP_Extract extract,
584  Scheduler_SMP_Get_highest_ready get_highest_ready,
585  Scheduler_SMP_Move move_from_ready_to_scheduled
586)
587{
588  ( *extract )( self, thread );
589
590  _Scheduler_SMP_Schedule(
591    self,
592    thread,
593    get_highest_ready,
594    move_from_ready_to_scheduled
595  );
596}
597
598/**
599 * @brief Extracts a thread from the set of scheduled or ready nodes.
600 *
601 * @param[in] context The scheduler instance context.
602 * @param[in] thread The thread to extract.
603 * @param[in] extract Function to extract a node from the set of scheduled or
604 * ready nodes.
605 */
606static inline void _Scheduler_SMP_Extract(
607  Scheduler_SMP_Context *self,
608  Thread_Control *thread,
609  Scheduler_SMP_Extract extract
610)
611{
612  ( *extract )( self, thread );
613}
614
615static inline void _Scheduler_SMP_Insert_scheduled_lifo(
616  Scheduler_SMP_Context *self,
617  Thread_Control *thread
618)
619{
620  _Chain_Insert_ordered_unprotected(
621    &self->Scheduled,
622    &thread->Object.Node,
623    _Scheduler_simple_Insert_priority_lifo_order
624  );
625}
626
627static inline void _Scheduler_SMP_Insert_scheduled_fifo(
628  Scheduler_SMP_Context *self,
629  Thread_Control *thread
630)
631{
632  _Chain_Insert_ordered_unprotected(
633    &self->Scheduled,
634    &thread->Object.Node,
635    _Scheduler_simple_Insert_priority_fifo_order
636  );
637}
638
639static inline void _Scheduler_SMP_Start_idle(
640  Scheduler_SMP_Context *self,
641  Thread_Control *thread,
642  Per_CPU_Control *cpu
643)
644{
645  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
646
647  node->state = SCHEDULER_SMP_NODE_SCHEDULED;
648
649  _Thread_Set_CPU( thread, cpu );
650  _Chain_Append_unprotected( &self->Scheduled, &thread->Object.Node );
651}
652
653/** @} */
654
655#ifdef __cplusplus
656}
657#endif /* __cplusplus */
658
659#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.