source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ b532bb2c

4.115
Last change on this file since b532bb2c was b532bb2c, checked in by Sebastian Huber <sebastian.huber@…>, on 05/16/14 at 08:54:48

score: Fix state diagram

  • Property mode set to 100644
File size: 16.5 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
41 * - @ref SCHEDULER_SMP_NODE_READY.
42 *
43 * State transitions are triggered via basic operations
44 * - _Scheduler_SMP_Enqueue_ordered(),
45 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
46 * - _Scheduler_SMP_Block().
47 *
48 * @dot
49 * digraph {
50 *   node [style="filled"];
51 *
52 *   bs [label="BLOCKED"];
53 *   ss [label="SCHEDULED", fillcolor="green"];
54 *   rs [label="READY", fillcolor="red"];
55 *
56 *   edge [label="enqueue"];
57 *   edge [fontcolor="darkgreen", color="darkgreen"];
58 *
59 *   bs -> ss;
60 *
61 *   edge [fontcolor="red", color="red"];
62 *
63 *   bs -> rs;
64 *
65 *   edge [label="enqueue other"];
66 *
67 *   ss -> rs;
68 *
69 *   edge [label="block"];
70 *   edge [fontcolor="black", color="black"];
71 *
72 *   ss -> bs;
73 *   rs -> bs;
74 *
75 *   edge [label="block other"];
76 *   edge [fontcolor="darkgreen", color="darkgreen"];
77 *
78 *   rs -> ss;
79 * }
80 * @enddot
81 *
82 * During system initialization each processor of the scheduler instance starts
83 * with an idle thread assigned to it.  Lets have a look at an example with two
84 * idle threads I and J with priority 5.  We also have blocked threads A, B and
85 * C with priorities 1, 2 and 3 respectively.
86 *
87 * @dot
88 * digraph {
89 *   node [style="filled"];
90 *   edge [dir="none"];
91 *   subgraph {
92 *     rank = same;
93 *
94 *     i [label="I (5)", fillcolor="green"];
95 *     j [label="J (5)", fillcolor="green"];
96 *     a [label="A (1)"];
97 *     b [label="B (2)"];
98 *     c [label="C (3)"];
99 *     i -> j;
100 *   }
101 *
102 *   subgraph {
103 *     rank = same;
104 *
105 *     p0 [label="PROCESSOR 0", shape="box"];
106 *     p1 [label="PROCESSOR 1", shape="box"];
107 *   }
108 *
109 *   i -> p0;
110 *   j -> p1;
111 * }
112 * @enddot
113 *
114 * Lets start A.  For this an enqueue operation is performed.
115 *
116 * @dot
117 * digraph {
118 *   node [style="filled"];
119 *   edge [dir="none"];
120 *
121 *   subgraph {
122 *     rank = same;
123 *
124 *     i [label="I (5)", fillcolor="green"];
125 *     j [label="J (5)", fillcolor="red"];
126 *     a [label="A (1)", fillcolor="green"];
127 *     b [label="B (2)"];
128 *     c [label="C (3)"];
129 *     a -> i;
130 *   }
131 *
132 *   subgraph {
133 *     rank = same;
134 *
135 *     p0 [label="PROCESSOR 0", shape="box"];
136 *     p1 [label="PROCESSOR 1", shape="box"];
137 *   }
138 *
139 *   i -> p0;
140 *   a -> p1;
141 * }
142 * @enddot
143 *
144 * Lets start C.
145 *
146 * @dot
147 * digraph {
148 *   node [style="filled"];
149 *   edge [dir="none"];
150 *
151 *   subgraph {
152 *     rank = same;
153 *
154 *     a [label="A (1)", fillcolor="green"];
155 *     c [label="C (3)", fillcolor="green"];
156 *     i [label="I (5)", fillcolor="red"];
157 *     j [label="J (5)", fillcolor="red"];
158 *     b [label="B (2)"];
159 *     a -> c;
160 *     i -> j;
161 *   }
162 *
163 *   subgraph {
164 *     rank = same;
165 *
166 *     p0 [label="PROCESSOR 0", shape="box"];
167 *     p1 [label="PROCESSOR 1", shape="box"];
168 *   }
169 *
170 *   c -> p0;
171 *   a -> p1;
172 * }
173 * @enddot
174 *
175 * Lets start B.
176 *
177 * @dot
178 * digraph {
179 *   node [style="filled"];
180 *   edge [dir="none"];
181 *
182 *   subgraph {
183 *     rank = same;
184 *
185 *     a [label="A (1)", fillcolor="green"];
186 *     b [label="B (2)", fillcolor="green"];
187 *     c [label="C (3)", fillcolor="red"];
188 *     i [label="I (5)", fillcolor="red"];
189 *     j [label="J (5)", fillcolor="red"];
190 *     a -> b;
191 *     c -> i -> j;
192 *   }
193 *
194 *   subgraph {
195 *     rank = same;
196 *
197 *     p0 [label="PROCESSOR 0", shape="box"];
198 *     p1 [label="PROCESSOR 1", shape="box"];
199 *   }
200 *
201 *   b -> p0;
202 *   a -> p1;
203 * }
204 * @enddot
205 *
206 * Lets change the priority of thread A to 4.
207 *
208 * @dot
209 * digraph {
210 *   node [style="filled"];
211 *   edge [dir="none"];
212 *
213 *   subgraph {
214 *     rank = same;
215 *
216 *     b [label="B (2)", fillcolor="green"];
217 *     c [label="C (3)", fillcolor="green"];
218 *     a [label="A (4)", fillcolor="red"];
219 *     i [label="I (5)", fillcolor="red"];
220 *     j [label="J (5)", fillcolor="red"];
221 *     b -> c;
222 *     a -> i -> j;
223 *   }
224 *
225 *   subgraph {
226 *     rank = same;
227 *
228 *     p0 [label="PROCESSOR 0", shape="box"];
229 *     p1 [label="PROCESSOR 1", shape="box"];
230 *   }
231 *
232 *   b -> p0;
233 *   c -> p1;
234 * }
235 * @enddot
236 *
237 * Now perform a blocking operation with thread B.  Please note that thread A
238 * migrated now from processor 0 to processor 1 and thread C still executes on
239 * processor 1.
240 *
241 * @dot
242 * digraph {
243 *   node [style="filled"];
244 *   edge [dir="none"];
245 *
246 *   subgraph {
247 *     rank = same;
248 *
249 *     c [label="C (3)", fillcolor="green"];
250 *     a [label="A (4)", fillcolor="green"];
251 *     i [label="I (5)", fillcolor="red"];
252 *     j [label="J (5)", fillcolor="red"];
253 *     b [label="B (2)"];
254 *     c -> a;
255 *     i -> j;
256 *   }
257 *
258 *   subgraph {
259 *     rank = same;
260 *
261 *     p0 [label="PROCESSOR 0", shape="box"];
262 *     p1 [label="PROCESSOR 1", shape="box"];
263 *   }
264 *
265 *   a -> p0;
266 *   c -> p1;
267 * }
268 * @enddot
269 *
270 * @{
271 */
272
273typedef Thread_Control *( *Scheduler_SMP_Get_highest_ready )(
274  Scheduler_Context *context
275);
276
277typedef void ( *Scheduler_SMP_Extract )(
278  Scheduler_Context *context,
279  Thread_Control *thread
280);
281
282typedef void ( *Scheduler_SMP_Insert )(
283  Scheduler_Context *context,
284  Thread_Control *thread_to_insert
285);
286
287typedef void ( *Scheduler_SMP_Move )(
288  Scheduler_Context *context,
289  Thread_Control *thread_to_move
290);
291
292typedef void ( *Scheduler_SMP_Update )(
293  Scheduler_Context *context,
294  Scheduler_Node *node,
295  Priority_Control new_priority
296);
297
298typedef void ( *Scheduler_SMP_Enqueue )(
299  Scheduler_Context *context,
300  Thread_Control *thread_to_enqueue
301);
302
303static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
304  Scheduler_Context *context
305)
306{
307  return (Scheduler_SMP_Context *) context;
308}
309
310static inline void _Scheduler_SMP_Initialize(
311  Scheduler_SMP_Context *self
312)
313{
314  _Chain_Initialize_empty( &self->Scheduled );
315}
316
317static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_get(
318  Thread_Control *thread
319)
320{
321  return (Scheduler_SMP_Node *) _Scheduler_Node_get( thread );
322}
323
324static inline void _Scheduler_SMP_Node_initialize(
325  Scheduler_SMP_Node *node
326)
327{
328  node->state = SCHEDULER_SMP_NODE_BLOCKED;
329}
330
331extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
332
333static inline void _Scheduler_SMP_Node_change_state(
334  Scheduler_SMP_Node *node,
335  Scheduler_SMP_Node_state new_state
336)
337{
338  _Assert(
339    _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ]
340  );
341
342  node->state = new_state;
343}
344
345static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
346  const Scheduler_SMP_Context *self,
347  const Per_CPU_Control *cpu
348)
349{
350  return cpu->scheduler_context == &self->Base;
351}
352
353static inline void _Scheduler_SMP_Update_heir(
354  Per_CPU_Control *cpu_self,
355  Per_CPU_Control *cpu_for_heir,
356  Thread_Control *heir
357)
358{
359  cpu_for_heir->heir = heir;
360
361  /*
362   * It is critical that we first update the heir and then the dispatch
363   * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
364   * update.
365   */
366  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
367
368  /*
369   * Only update the dispatch necessary indicator if not already set to
370   * avoid superfluous inter-processor interrupts.
371   */
372  if ( !cpu_for_heir->dispatch_necessary ) {
373    cpu_for_heir->dispatch_necessary = true;
374
375    if ( cpu_for_heir != cpu_self ) {
376      _Per_CPU_Send_interrupt( cpu_for_heir );
377    }
378  }
379}
380
381static inline void _Scheduler_SMP_Allocate_processor(
382  Scheduler_SMP_Context *self,
383  Thread_Control *scheduled,
384  Thread_Control *victim
385)
386{
387  Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled );
388  Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
389  Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim );
390  Per_CPU_Control *cpu_self = _Per_CPU_Get();
391  Thread_Control *heir;
392
393  _Scheduler_SMP_Node_change_state(
394    scheduled_node,
395    SCHEDULER_SMP_NODE_SCHEDULED
396  );
397
398  _Assert( _ISR_Get_level() != 0 );
399
400  if ( _Thread_Is_executing_on_a_processor( scheduled ) ) {
401    if ( _Scheduler_SMP_Is_processor_owned_by_us( self, cpu_of_scheduled ) ) {
402      heir = cpu_of_scheduled->heir;
403      _Scheduler_SMP_Update_heir( cpu_self, cpu_of_scheduled, scheduled );
404    } else {
405      /* We have to force a migration to our processor set */
406      _Assert( scheduled->debug_real_cpu->heir != scheduled );
407      heir = scheduled;
408    }
409  } else {
410    heir = scheduled;
411  }
412
413  if ( heir != victim ) {
414    _Thread_Set_CPU( heir, cpu_of_victim );
415    _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, heir );
416  }
417}
418
419static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
420  Scheduler_SMP_Context *self
421)
422{
423  Thread_Control *lowest_ready = NULL;
424  Chain_Control *scheduled = &self->Scheduled;
425
426  if ( !_Chain_Is_empty( scheduled ) ) {
427    lowest_ready = (Thread_Control *) _Chain_Last( scheduled );
428  }
429
430  return lowest_ready;
431}
432
433/**
434 * @brief Enqueues a thread according to the specified order function.
435 *
436 * The thread must not be in the scheduled state.
437 *
438 * @param[in] context The scheduler instance context.
439 * @param[in] thread The thread to enqueue.
440 * @param[in] order The order function.
441 * @param[in] insert_ready Function to insert a node into the set of ready
442 * nodes.
443 * @param[in] insert_scheduled Function to insert a node into the set of
444 * scheduled nodes.
445 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
446 * of scheduled nodes to the set of ready nodes.
447 */
448static inline void _Scheduler_SMP_Enqueue_ordered(
449  Scheduler_Context *context,
450  Thread_Control *thread,
451  Chain_Node_order order,
452  Scheduler_SMP_Insert insert_ready,
453  Scheduler_SMP_Insert insert_scheduled,
454  Scheduler_SMP_Move move_from_scheduled_to_ready
455)
456{
457  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
458  Thread_Control *lowest_scheduled =
459    _Scheduler_SMP_Get_lowest_scheduled( self );
460
461  _Assert( lowest_scheduled != NULL);
462
463  /*
464   * NOTE: Do not exchange parameters to do the negation of the order check.
465   */
466  if ( ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) ) {
467    Scheduler_SMP_Node *lowest_scheduled_node =
468      _Scheduler_SMP_Node_get( lowest_scheduled );
469
470    _Scheduler_SMP_Node_change_state(
471      lowest_scheduled_node,
472      SCHEDULER_SMP_NODE_READY
473    );
474    _Scheduler_SMP_Allocate_processor( self, thread, lowest_scheduled );
475    ( *insert_scheduled )( &self->Base, thread );
476    ( *move_from_scheduled_to_ready )( &self->Base, lowest_scheduled );
477  } else {
478    ( *insert_ready )( &self->Base, thread );
479  }
480}
481
482/**
483 * @brief Enqueues a scheduled thread according to the specified order
484 * function.
485 *
486 * @param[in] context The scheduler instance context.
487 * @param[in] thread The thread to enqueue.
488 * @param[in] order The order function.
489 * @param[in] get_highest_ready Function to get the highest ready node.
490 * @param[in] insert_ready Function to insert a node into the set of ready
491 * nodes.
492 * @param[in] insert_scheduled Function to insert a node into the set of
493 * scheduled nodes.
494 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
495 * of ready nodes to the set of scheduled nodes.
496 */
497static inline void _Scheduler_SMP_Enqueue_scheduled_ordered(
498  Scheduler_Context *context,
499  Thread_Control *thread,
500  Chain_Node_order order,
501  Scheduler_SMP_Get_highest_ready get_highest_ready,
502  Scheduler_SMP_Insert insert_ready,
503  Scheduler_SMP_Insert insert_scheduled,
504  Scheduler_SMP_Move move_from_ready_to_scheduled
505)
506{
507  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
508  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
509  Thread_Control *highest_ready = ( *get_highest_ready )( &self->Base );
510
511  _Assert( highest_ready != NULL);
512
513  /*
514   * The thread has been extracted from the scheduled chain.  We have to place
515   * it now on the scheduled or ready set.
516   *
517   * NOTE: Do not exchange parameters to do the negation of the order check.
518   */
519  if ( !( *order )( &thread->Object.Node, &highest_ready->Object.Node ) ) {
520    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
521    _Scheduler_SMP_Allocate_processor( self, highest_ready, thread );
522    ( *insert_ready )( &self->Base, thread );
523    ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
524  } else {
525    ( *insert_scheduled )( &self->Base, thread );
526  }
527}
528
529static inline void _Scheduler_SMP_Extract_from_scheduled(
530  Thread_Control *thread
531)
532{
533  _Chain_Extract_unprotected( &thread->Object.Node );
534}
535
536static inline void _Scheduler_SMP_Schedule_highest_ready(
537  Scheduler_Context *context,
538  Thread_Control *victim,
539  Scheduler_SMP_Get_highest_ready get_highest_ready,
540  Scheduler_SMP_Move move_from_ready_to_scheduled
541)
542{
543  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
544  Thread_Control *highest_ready = ( *get_highest_ready )( &self->Base );
545
546  _Scheduler_SMP_Allocate_processor( self, highest_ready, victim );
547
548  ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
549}
550
551/**
552 * @brief Blocks a thread.
553 *
554 * @param[in] context The scheduler instance context.
555 * @param[in] thread The thread of the scheduling operation.
556 * @param[in] extract_from_ready Function to extract a node from the set of
557 * ready nodes.
558 * @param[in] get_highest_ready Function to get the highest ready node.
559 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
560 * of ready nodes to the set of scheduled nodes.
561 */
562static inline void _Scheduler_SMP_Block(
563  Scheduler_Context *context,
564  Thread_Control *thread,
565  Scheduler_SMP_Extract extract_from_ready,
566  Scheduler_SMP_Get_highest_ready get_highest_ready,
567  Scheduler_SMP_Move move_from_ready_to_scheduled
568)
569{
570  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
571  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
572
573  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
574
575  if ( is_scheduled ) {
576    _Scheduler_SMP_Extract_from_scheduled( thread );
577
578    _Scheduler_SMP_Schedule_highest_ready(
579      context,
580      thread,
581      get_highest_ready,
582      move_from_ready_to_scheduled
583    );
584  } else {
585    ( *extract_from_ready )( context, thread );
586  }
587}
588
589static inline void _Scheduler_SMP_Unblock(
590  Scheduler_Context *context,
591  Thread_Control *thread,
592  Scheduler_SMP_Enqueue enqueue_fifo
593)
594{
595  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
596
597  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
598
599  ( *enqueue_fifo )( context, thread );
600}
601
602static inline void _Scheduler_SMP_Change_priority(
603  Scheduler_Context *context,
604  Thread_Control *thread,
605  Priority_Control new_priority,
606  bool prepend_it,
607  Scheduler_SMP_Extract extract_from_ready,
608  Scheduler_SMP_Update update,
609  Scheduler_SMP_Enqueue enqueue_fifo,
610  Scheduler_SMP_Enqueue enqueue_lifo,
611  Scheduler_SMP_Enqueue enqueue_scheduled_fifo,
612  Scheduler_SMP_Enqueue enqueue_scheduled_lifo
613)
614{
615  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
616
617  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
618    _Scheduler_SMP_Extract_from_scheduled( thread );
619
620    ( *update )( context, &node->Base, new_priority );
621
622    if ( prepend_it ) {
623      ( *enqueue_scheduled_lifo )( context, thread );
624    } else {
625      ( *enqueue_scheduled_fifo )( context, thread );
626    }
627  } else {
628    ( *extract_from_ready )( context, thread );
629
630    ( *update )( context, &node->Base, new_priority );
631
632    if ( prepend_it ) {
633      ( *enqueue_lifo )( context, thread );
634    } else {
635      ( *enqueue_fifo )( context, thread );
636    }
637  }
638}
639
640static inline void _Scheduler_SMP_Insert_scheduled_lifo(
641  Scheduler_Context *context,
642  Thread_Control *thread
643)
644{
645  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
646
647  _Chain_Insert_ordered_unprotected(
648    &self->Scheduled,
649    &thread->Object.Node,
650    _Scheduler_simple_Insert_priority_lifo_order
651  );
652}
653
654static inline void _Scheduler_SMP_Insert_scheduled_fifo(
655  Scheduler_Context *context,
656  Thread_Control *thread
657)
658{
659  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
660
661  _Chain_Insert_ordered_unprotected(
662    &self->Scheduled,
663    &thread->Object.Node,
664    _Scheduler_simple_Insert_priority_fifo_order
665  );
666}
667
668/** @} */
669
670#ifdef __cplusplus
671}
672#endif /* __cplusplus */
673
674#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.