source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ a7a8ec03

5
Last change on this file since a7a8ec03 was a7a8ec03, checked in by Sebastian Huber <sebastian.huber@…>, on 10/12/16 at 07:55:34

score: Protect thread scheduler state changes

Update #2556.

  • Property mode set to 100644
File size: 30.0 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013-2015 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
41 * - @ref SCHEDULER_SMP_NODE_READY.
42 *
43 * State transitions are triggered via basic operations
44 * - _Scheduler_SMP_Enqueue_ordered(),
45 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
46 * - _Scheduler_SMP_Block().
47 *
48 * @dot
49 * digraph {
50 *   node [style="filled"];
51 *
52 *   bs [label="BLOCKED"];
53 *   ss [label="SCHEDULED", fillcolor="green"];
54 *   rs [label="READY", fillcolor="red"];
55 *
56 *   edge [label="enqueue"];
57 *   edge [fontcolor="darkgreen", color="darkgreen"];
58 *
59 *   bs -> ss;
60 *
61 *   edge [fontcolor="red", color="red"];
62 *
63 *   bs -> rs;
64 *
65 *   edge [label="enqueue other"];
66 *
67 *   ss -> rs;
68 *
69 *   edge [label="block"];
70 *   edge [fontcolor="black", color="black"];
71 *
72 *   ss -> bs;
73 *   rs -> bs;
74 *
75 *   edge [label="block other"];
76 *   edge [fontcolor="darkgreen", color="darkgreen"];
77 *
78 *   rs -> ss;
79 * }
80 * @enddot
81 *
82 * During system initialization each processor of the scheduler instance starts
83 * with an idle thread assigned to it.  Lets have a look at an example with two
84 * idle threads I and J with priority 5.  We also have blocked threads A, B and
85 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
86 * with respect to the thread priority from left to right in the below
87 * diagrams.  The highest priority node (lowest priority number) is the
88 * leftmost node.  Since the processor assignment is independent of the thread
89 * priority the processor indices may move from one state to the other.
90 *
91 * @dot
92 * digraph {
93 *   node [style="filled"];
94 *   edge [dir="none"];
95 *   subgraph {
96 *     rank = same;
97 *
98 *     i [label="I (5)", fillcolor="green"];
99 *     j [label="J (5)", fillcolor="green"];
100 *     a [label="A (1)"];
101 *     b [label="B (2)"];
102 *     c [label="C (3)"];
103 *     i -> j;
104 *   }
105 *
106 *   subgraph {
107 *     rank = same;
108 *
109 *     p0 [label="PROCESSOR 0", shape="box"];
110 *     p1 [label="PROCESSOR 1", shape="box"];
111 *   }
112 *
113 *   i -> p0;
114 *   j -> p1;
115 * }
116 * @enddot
117 *
118 * Lets start A.  For this an enqueue operation is performed.
119 *
120 * @dot
121 * digraph {
122 *   node [style="filled"];
123 *   edge [dir="none"];
124 *
125 *   subgraph {
126 *     rank = same;
127 *
128 *     i [label="I (5)", fillcolor="green"];
129 *     j [label="J (5)", fillcolor="red"];
130 *     a [label="A (1)", fillcolor="green"];
131 *     b [label="B (2)"];
132 *     c [label="C (3)"];
133 *     a -> i;
134 *   }
135 *
136 *   subgraph {
137 *     rank = same;
138 *
139 *     p0 [label="PROCESSOR 0", shape="box"];
140 *     p1 [label="PROCESSOR 1", shape="box"];
141 *   }
142 *
143 *   i -> p0;
144 *   a -> p1;
145 * }
146 * @enddot
147 *
148 * Lets start C.
149 *
150 * @dot
151 * digraph {
152 *   node [style="filled"];
153 *   edge [dir="none"];
154 *
155 *   subgraph {
156 *     rank = same;
157 *
158 *     a [label="A (1)", fillcolor="green"];
159 *     c [label="C (3)", fillcolor="green"];
160 *     i [label="I (5)", fillcolor="red"];
161 *     j [label="J (5)", fillcolor="red"];
162 *     b [label="B (2)"];
163 *     a -> c;
164 *     i -> j;
165 *   }
166 *
167 *   subgraph {
168 *     rank = same;
169 *
170 *     p0 [label="PROCESSOR 0", shape="box"];
171 *     p1 [label="PROCESSOR 1", shape="box"];
172 *   }
173 *
174 *   c -> p0;
175 *   a -> p1;
176 * }
177 * @enddot
178 *
179 * Lets start B.
180 *
181 * @dot
182 * digraph {
183 *   node [style="filled"];
184 *   edge [dir="none"];
185 *
186 *   subgraph {
187 *     rank = same;
188 *
189 *     a [label="A (1)", fillcolor="green"];
190 *     b [label="B (2)", fillcolor="green"];
191 *     c [label="C (3)", fillcolor="red"];
192 *     i [label="I (5)", fillcolor="red"];
193 *     j [label="J (5)", fillcolor="red"];
194 *     a -> b;
195 *     c -> i -> j;
196 *   }
197 *
198 *   subgraph {
199 *     rank = same;
200 *
201 *     p0 [label="PROCESSOR 0", shape="box"];
202 *     p1 [label="PROCESSOR 1", shape="box"];
203 *   }
204 *
205 *   b -> p0;
206 *   a -> p1;
207 * }
208 * @enddot
209 *
210 * Lets change the priority of thread A to 4.
211 *
212 * @dot
213 * digraph {
214 *   node [style="filled"];
215 *   edge [dir="none"];
216 *
217 *   subgraph {
218 *     rank = same;
219 *
220 *     b [label="B (2)", fillcolor="green"];
221 *     c [label="C (3)", fillcolor="green"];
222 *     a [label="A (4)", fillcolor="red"];
223 *     i [label="I (5)", fillcolor="red"];
224 *     j [label="J (5)", fillcolor="red"];
225 *     b -> c;
226 *     a -> i -> j;
227 *   }
228 *
229 *   subgraph {
230 *     rank = same;
231 *
232 *     p0 [label="PROCESSOR 0", shape="box"];
233 *     p1 [label="PROCESSOR 1", shape="box"];
234 *   }
235 *
236 *   b -> p0;
237 *   c -> p1;
238 * }
239 * @enddot
240 *
241 * Now perform a blocking operation with thread B.  Please note that thread A
242 * migrated now from processor 0 to processor 1 and thread C still executes on
243 * processor 1.
244 *
245 * @dot
246 * digraph {
247 *   node [style="filled"];
248 *   edge [dir="none"];
249 *
250 *   subgraph {
251 *     rank = same;
252 *
253 *     c [label="C (3)", fillcolor="green"];
254 *     a [label="A (4)", fillcolor="green"];
255 *     i [label="I (5)", fillcolor="red"];
256 *     j [label="J (5)", fillcolor="red"];
257 *     b [label="B (2)"];
258 *     c -> a;
259 *     i -> j;
260 *   }
261 *
262 *   subgraph {
263 *     rank = same;
264 *
265 *     p0 [label="PROCESSOR 0", shape="box"];
266 *     p1 [label="PROCESSOR 1", shape="box"];
267 *   }
268 *
269 *   a -> p0;
270 *   c -> p1;
271 * }
272 * @enddot
273 *
274 * @{
275 */
276
277typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
278  Scheduler_Context *context,
279  Scheduler_Node    *node
280);
281
282typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
283  Scheduler_Context *context,
284  Scheduler_Node    *filter,
285  Chain_Node_order   order
286);
287
288typedef void ( *Scheduler_SMP_Extract )(
289  Scheduler_Context *context,
290  Scheduler_Node    *node_to_extract
291);
292
293typedef void ( *Scheduler_SMP_Insert )(
294  Scheduler_Context *context,
295  Scheduler_Node    *node_to_insert
296);
297
298typedef void ( *Scheduler_SMP_Move )(
299  Scheduler_Context *context,
300  Scheduler_Node    *node_to_move
301);
302
303typedef void ( *Scheduler_SMP_Update )(
304  Scheduler_Context *context,
305  Scheduler_Node    *node_to_update,
306  Priority_Control   new_priority
307);
308
309typedef Thread_Control *( *Scheduler_SMP_Enqueue )(
310  Scheduler_Context *context,
311  Scheduler_Node    *node_to_enqueue,
312  Thread_Control    *needs_help
313);
314
315typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )(
316  Scheduler_Context *context,
317  Scheduler_Node    *node_to_enqueue
318);
319
320typedef void ( *Scheduler_SMP_Allocate_processor )(
321  Scheduler_Context *context,
322  Thread_Control    *scheduled,
323  Thread_Control    *victim
324);
325
326static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
327  const Chain_Node *to_insert,
328  const Chain_Node *next
329)
330{
331  const Scheduler_SMP_Node *node_to_insert =
332    (const Scheduler_SMP_Node *) to_insert;
333  const Scheduler_SMP_Node *node_next =
334    (const Scheduler_SMP_Node *) next;
335
336  return node_to_insert->priority <= node_next->priority;
337}
338
339static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
340  const Chain_Node *to_insert,
341  const Chain_Node *next
342)
343{
344  const Scheduler_SMP_Node *node_to_insert =
345    (const Scheduler_SMP_Node *) to_insert;
346  const Scheduler_SMP_Node *node_next =
347    (const Scheduler_SMP_Node *) next;
348
349  return node_to_insert->priority < node_next->priority;
350}
351
352static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
353  Scheduler_Context *context
354)
355{
356  return (Scheduler_SMP_Context *) context;
357}
358
359static inline void _Scheduler_SMP_Initialize(
360  Scheduler_SMP_Context *self
361)
362{
363  _Chain_Initialize_empty( &self->Scheduled );
364  _Chain_Initialize_empty( &self->Idle_threads );
365}
366
367static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
368  Thread_Control *thread
369)
370{
371  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread );
372}
373
374static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
375  Thread_Control *thread
376)
377{
378  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_own_node( thread );
379}
380
381static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
382  Scheduler_Node *node
383)
384{
385  return (Scheduler_SMP_Node *) node;
386}
387
388static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
389  const Scheduler_Node *node
390)
391{
392  return ( (const Scheduler_SMP_Node *) node )->state;
393}
394
395static inline Priority_Control _Scheduler_SMP_Node_priority(
396  const Scheduler_Node *node
397)
398{
399  return ( (const Scheduler_SMP_Node *) node )->priority;
400}
401
402static inline void _Scheduler_SMP_Node_initialize(
403  const Scheduler_Control *scheduler,
404  Scheduler_SMP_Node      *node,
405  Thread_Control          *thread,
406  Priority_Control         priority
407)
408{
409  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
410  node->state = SCHEDULER_SMP_NODE_BLOCKED;
411  node->priority = priority;
412}
413
414static inline void _Scheduler_SMP_Node_update_priority(
415  Scheduler_SMP_Node *node,
416  Priority_Control    new_priority
417)
418{
419  node->priority = new_priority;
420}
421
422extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
423
424static inline void _Scheduler_SMP_Node_change_state(
425  Scheduler_Node           *node,
426  Scheduler_SMP_Node_state  new_state
427)
428{
429  Scheduler_SMP_Node *the_node;
430
431  the_node = _Scheduler_SMP_Node_downcast( node );
432  _Assert(
433    _Scheduler_SMP_Node_valid_state_changes[ the_node->state ][ new_state ]
434  );
435
436  the_node->state = new_state;
437}
438
439static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
440  const Scheduler_Context *context,
441  const Per_CPU_Control   *cpu
442)
443{
444  return cpu->scheduler_context == context;
445}
446
447static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
448  Scheduler_Context *context
449)
450{
451  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
452  Thread_Control *idle = (Thread_Control *)
453    _Chain_Get_first_unprotected( &self->Idle_threads );
454
455  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
456
457  return idle;
458}
459
460static inline void _Scheduler_SMP_Release_idle_thread(
461  Scheduler_Context *context,
462  Thread_Control    *idle
463)
464{
465  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
466
467  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
468}
469
470static inline void _Scheduler_SMP_Allocate_processor_lazy(
471  Scheduler_Context *context,
472  Thread_Control    *scheduled_thread,
473  Thread_Control    *victim_thread
474)
475{
476  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
477  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
478  Per_CPU_Control *cpu_self = _Per_CPU_Get();
479  Thread_Control *heir;
480
481  _Assert( _ISR_Get_level() != 0 );
482
483  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
484    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
485      heir = scheduled_cpu->heir;
486      _Thread_Dispatch_update_heir(
487        cpu_self,
488        scheduled_cpu,
489        scheduled_thread
490      );
491    } else {
492      /* We have to force a migration to our processor set */
493      heir = scheduled_thread;
494    }
495  } else {
496    heir = scheduled_thread;
497  }
498
499  if ( heir != victim_thread ) {
500    _Thread_Set_CPU( heir, victim_cpu );
501    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
502  }
503}
504
505/*
506 * This method is slightly different from
507 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
508 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
509 * but does not take into account affinity.
510 */
511static inline void _Scheduler_SMP_Allocate_processor_exact(
512  Scheduler_Context *context,
513  Thread_Control    *scheduled_thread,
514  Thread_Control    *victim_thread
515)
516{
517  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
518  Per_CPU_Control *cpu_self = _Per_CPU_Get();
519
520  (void) context;
521
522  _Thread_Set_CPU( scheduled_thread, victim_cpu );
523  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
524}
525
526static inline void _Scheduler_SMP_Allocate_processor(
527  Scheduler_Context                *context,
528  Scheduler_Node                   *scheduled,
529  Thread_Control                   *victim_thread,
530  Scheduler_SMP_Allocate_processor  allocate_processor
531)
532{
533  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
534
535  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
536
537  ( *allocate_processor )( context, scheduled_thread, victim_thread );
538}
539
540static inline Thread_Control *_Scheduler_SMP_Preempt(
541  Scheduler_Context                *context,
542  Scheduler_Node                   *scheduled,
543  Scheduler_Node                   *victim,
544  Scheduler_SMP_Allocate_processor  allocate_processor
545)
546{
547  Thread_Control   *victim_thread;
548  ISR_lock_Context  lock_context;
549
550  victim_thread = _Scheduler_Node_get_user( victim );
551  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
552
553  _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
554  _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
555  _Thread_Scheduler_release_critical( victim_thread, &lock_context );
556
557  _Scheduler_SMP_Allocate_processor(
558    context,
559    scheduled,
560    victim_thread,
561    allocate_processor
562  );
563
564  return victim_thread;
565}
566
567static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
568  Scheduler_Context *context,
569  Scheduler_Node    *filter,
570  Chain_Node_order   order
571)
572{
573  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
574  Chain_Control *scheduled = &self->Scheduled;
575  Scheduler_Node *lowest_scheduled =
576    (Scheduler_Node *) _Chain_Last( scheduled );
577
578  (void) filter;
579  (void) order;
580
581  _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
582
583  return lowest_scheduled;
584}
585
586static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
587  Scheduler_Context                *context,
588  Scheduler_Node                   *node,
589  Scheduler_Node                   *lowest_scheduled,
590  Scheduler_SMP_Insert              insert_scheduled,
591  Scheduler_SMP_Move                move_from_scheduled_to_ready,
592  Scheduler_SMP_Allocate_processor  allocate_processor
593)
594{
595  Thread_Control *needs_help;
596  Scheduler_Try_to_schedule_action action;
597
598  action = _Scheduler_Try_to_schedule_node(
599    context,
600    node,
601    _Scheduler_Node_get_idle( lowest_scheduled ),
602    _Scheduler_SMP_Get_idle_thread
603  );
604
605  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
606    Thread_Control *lowest_scheduled_user;
607    Thread_Control *idle;
608
609    lowest_scheduled_user = _Scheduler_SMP_Preempt(
610      context,
611      node,
612      lowest_scheduled,
613      allocate_processor
614    );
615
616    ( *insert_scheduled )( context, node );
617    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
618
619    idle = _Scheduler_Release_idle_thread(
620      context,
621      lowest_scheduled,
622      _Scheduler_SMP_Release_idle_thread
623    );
624    if ( idle == NULL ) {
625      needs_help = lowest_scheduled_user;
626    } else {
627      needs_help = NULL;
628    }
629  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
630    _Scheduler_SMP_Node_change_state(
631      lowest_scheduled,
632      SCHEDULER_SMP_NODE_READY
633    );
634    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
635
636    ( *insert_scheduled )( context, node );
637    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
638
639    _Scheduler_Exchange_idle_thread(
640      node,
641      lowest_scheduled,
642      _Scheduler_Node_get_idle( lowest_scheduled )
643    );
644
645    needs_help = NULL;
646  } else {
647    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
648    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
649    needs_help = NULL;
650  }
651
652  return needs_help;
653}
654
655/**
656 * @brief Enqueues a node according to the specified order function.
657 *
658 * The node must not be in the scheduled state.
659 *
660 * @param[in] context The scheduler instance context.
661 * @param[in] node The node to enqueue.
662 * @param[in] needs_help The thread needing help in case the node cannot be
663 *   scheduled.
664 * @param[in] order The order function.
665 * @param[in] insert_ready Function to insert a node into the set of ready
666 *   nodes.
667 * @param[in] insert_scheduled Function to insert a node into the set of
668 *   scheduled nodes.
669 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
670 *   of scheduled nodes to the set of ready nodes.
671 * @param[in] get_lowest_scheduled Function to select the node from the
672 *   scheduled nodes to replace.  It may not be possible to find one, in this
673 *   case a pointer must be returned so that the order functions returns false
674 *   if this pointer is passed as the second argument to the order function.
675 * @param[in] allocate_processor Function to allocate a processor to a node
676 *   based on the rules of the scheduler.
677 */
678static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
679  Scheduler_Context                  *context,
680  Scheduler_Node                     *node,
681  Thread_Control                     *needs_help,
682  Chain_Node_order                    order,
683  Scheduler_SMP_Insert                insert_ready,
684  Scheduler_SMP_Insert                insert_scheduled,
685  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
686  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
687  Scheduler_SMP_Allocate_processor    allocate_processor
688)
689{
690  Scheduler_Node *lowest_scheduled =
691    ( *get_lowest_scheduled )( context, node, order );
692
693  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
694    needs_help = _Scheduler_SMP_Enqueue_to_scheduled(
695      context,
696      node,
697      lowest_scheduled,
698      insert_scheduled,
699      move_from_scheduled_to_ready,
700      allocate_processor
701    );
702  } else {
703    ( *insert_ready )( context, node );
704  }
705
706  return needs_help;
707}
708
709/**
710 * @brief Enqueues a scheduled node according to the specified order
711 * function.
712 *
713 * @param[in] context The scheduler instance context.
714 * @param[in] node The node to enqueue.
715 * @param[in] order The order function.
716 * @param[in] extract_from_ready Function to extract a node from the set of
717 *   ready nodes.
718 * @param[in] get_highest_ready Function to get the highest ready node.
719 * @param[in] insert_ready Function to insert a node into the set of ready
720 *   nodes.
721 * @param[in] insert_scheduled Function to insert a node into the set of
722 *   scheduled nodes.
723 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
724 *   of ready nodes to the set of scheduled nodes.
725 * @param[in] allocate_processor Function to allocate a processor to a node
726 *   based on the rules of the scheduler.
727 */
728static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
729  Scheduler_Context                *context,
730  Scheduler_Node                   *node,
731  Chain_Node_order                  order,
732  Scheduler_SMP_Extract             extract_from_ready,
733  Scheduler_SMP_Get_highest_ready   get_highest_ready,
734  Scheduler_SMP_Insert              insert_ready,
735  Scheduler_SMP_Insert              insert_scheduled,
736  Scheduler_SMP_Move                move_from_ready_to_scheduled,
737  Scheduler_SMP_Allocate_processor  allocate_processor
738)
739{
740  while ( true ) {
741    Scheduler_Node                   *highest_ready;
742    Scheduler_Try_to_schedule_action  action;
743
744    highest_ready = ( *get_highest_ready )( context, node );
745
746    /*
747     * The node has been extracted from the scheduled chain.  We have to place
748     * it now on the scheduled or ready set.
749     */
750    if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
751      ( *insert_scheduled )( context, node );
752      return NULL;
753    }
754
755    action = _Scheduler_Try_to_schedule_node(
756      context,
757      highest_ready,
758      _Scheduler_Node_get_idle( node ),
759      _Scheduler_SMP_Get_idle_thread
760    );
761
762    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
763      Thread_Control *user;
764      Thread_Control *idle;
765
766      user = _Scheduler_SMP_Preempt(
767        context,
768        highest_ready,
769        node,
770        allocate_processor
771      );
772
773      ( *insert_ready )( context, node );
774      ( *move_from_ready_to_scheduled )( context, highest_ready );
775
776      idle = _Scheduler_Release_idle_thread(
777        context,
778        node,
779        _Scheduler_SMP_Release_idle_thread
780      );
781
782      if ( idle == NULL ) {
783        return user;
784      } else {
785        return NULL;
786      }
787    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
788      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
789      _Scheduler_SMP_Node_change_state(
790        highest_ready,
791        SCHEDULER_SMP_NODE_SCHEDULED
792      );
793
794      ( *insert_ready )( context, node );
795      ( *move_from_ready_to_scheduled )( context, highest_ready );
796
797      _Scheduler_Exchange_idle_thread(
798        highest_ready,
799        node,
800        _Scheduler_Node_get_idle( node )
801      );
802      return NULL;
803    } else {
804      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
805
806      _Scheduler_SMP_Node_change_state(
807        highest_ready,
808        SCHEDULER_SMP_NODE_BLOCKED
809      );
810
811      ( *extract_from_ready )( context, highest_ready );
812    }
813  }
814}
815
816static inline void _Scheduler_SMP_Extract_from_scheduled(
817  Scheduler_Node *node
818)
819{
820  _Chain_Extract_unprotected( &node->Node );
821}
822
823static inline void _Scheduler_SMP_Schedule_highest_ready(
824  Scheduler_Context                *context,
825  Scheduler_Node                   *victim,
826  Scheduler_SMP_Extract             extract_from_ready,
827  Scheduler_SMP_Get_highest_ready   get_highest_ready,
828  Scheduler_SMP_Move                move_from_ready_to_scheduled,
829  Scheduler_SMP_Allocate_processor  allocate_processor
830)
831{
832  Scheduler_Try_to_schedule_action action;
833
834  do {
835    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
836
837    action = _Scheduler_Try_to_schedule_node(
838      context,
839      highest_ready,
840      NULL,
841      _Scheduler_SMP_Get_idle_thread
842    );
843
844    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
845      _Scheduler_SMP_Allocate_processor(
846        context,
847        highest_ready,
848        _Scheduler_Node_get_user( victim ),
849        allocate_processor
850      );
851
852      ( *move_from_ready_to_scheduled )( context, highest_ready );
853    } else {
854      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
855
856      _Scheduler_SMP_Node_change_state(
857        highest_ready,
858        SCHEDULER_SMP_NODE_BLOCKED
859      );
860
861      ( *extract_from_ready )( context, highest_ready );
862    }
863  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
864}
865
866/**
867 * @brief Blocks a thread.
868 *
869 * @param[in] context The scheduler instance context.
870 * @param[in] thread The thread of the scheduling operation.
871 * @param[in] node The scheduler node of the thread to block.
872 * @param[in] extract_from_ready Function to extract a node from the set of
873 *   ready nodes.
874 * @param[in] get_highest_ready Function to get the highest ready node.
875 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
876 *   of ready nodes to the set of scheduled nodes.
877 */
878static inline void _Scheduler_SMP_Block(
879  Scheduler_Context                *context,
880  Thread_Control                   *thread,
881  Scheduler_Node                   *node,
882  Scheduler_SMP_Extract             extract_from_ready,
883  Scheduler_SMP_Get_highest_ready   get_highest_ready,
884  Scheduler_SMP_Move                move_from_ready_to_scheduled,
885  Scheduler_SMP_Allocate_processor  allocate_processor
886)
887{
888  Scheduler_SMP_Node_state node_state;
889  bool                     block;
890
891  node_state = _Scheduler_SMP_Node_state( node );
892  _Assert( node_state != SCHEDULER_SMP_NODE_BLOCKED );
893
894  block = _Scheduler_Block_node(
895    context,
896    thread,
897    node,
898    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
899    _Scheduler_SMP_Get_idle_thread
900  );
901  if ( block ) {
902    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
903
904    if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
905      _Scheduler_SMP_Extract_from_scheduled( node );
906
907      _Scheduler_SMP_Schedule_highest_ready(
908        context,
909        node,
910        extract_from_ready,
911        get_highest_ready,
912        move_from_ready_to_scheduled,
913        allocate_processor
914      );
915    } else {
916      ( *extract_from_ready )( context, node );
917    }
918  }
919}
920
921static inline Thread_Control *_Scheduler_SMP_Unblock(
922  Scheduler_Context     *context,
923  Thread_Control        *thread,
924  Scheduler_Node        *node,
925  Scheduler_SMP_Update   update,
926  Scheduler_SMP_Enqueue  enqueue_fifo
927)
928{
929  Scheduler_SMP_Node_state  node_state;
930  bool                      unblock;
931  Thread_Control           *needs_help;
932
933  node_state = _Scheduler_SMP_Node_state( node );
934  unblock = _Scheduler_Unblock_node(
935    context,
936    thread,
937    node,
938    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
939    _Scheduler_SMP_Release_idle_thread
940  );
941
942  if ( unblock ) {
943    Priority_Control new_priority;
944    bool             prepend_it;
945
946    new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
947    (void) prepend_it;
948
949    if ( new_priority != _Scheduler_SMP_Node_priority( node ) ) {
950      ( *update )( context, node, new_priority );
951    }
952
953    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
954      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
955
956      needs_help = ( *enqueue_fifo )( context, node, thread );
957    } else {
958      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
959      _Assert(
960        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
961          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
962      );
963      _Assert( node->idle == NULL );
964
965      if ( node->accepts_help == thread ) {
966        needs_help = thread;
967      } else {
968        needs_help = NULL;
969      }
970    }
971  } else {
972    needs_help = NULL;
973  }
974
975  return needs_help;
976}
977
978static inline Thread_Control *_Scheduler_SMP_Update_priority(
979  Scheduler_Context               *context,
980  Thread_Control                  *thread,
981  Scheduler_Node                  *node,
982  Scheduler_SMP_Extract            extract_from_ready,
983  Scheduler_SMP_Update             update,
984  Scheduler_SMP_Enqueue            enqueue_fifo,
985  Scheduler_SMP_Enqueue            enqueue_lifo,
986  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo,
987  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_lifo
988)
989{
990  Thread_Control          *needs_help;
991  Priority_Control         new_priority;
992  bool                     prepend_it;
993  Scheduler_SMP_Node_state node_state;
994
995  new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
996
997  if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) {
998    /* Nothing to do */
999    return NULL;
1000  }
1001
1002  node_state = _Scheduler_SMP_Node_state( node );
1003
1004  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1005    _Scheduler_SMP_Extract_from_scheduled( node );
1006
1007    ( *update )( context, node, new_priority );
1008
1009    if ( prepend_it ) {
1010      needs_help = ( *enqueue_scheduled_lifo )( context, node );
1011    } else {
1012      needs_help = ( *enqueue_scheduled_fifo )( context, node );
1013    }
1014  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1015    ( *extract_from_ready )( context, node );
1016
1017    ( *update )( context, node, new_priority );
1018
1019    if ( prepend_it ) {
1020      needs_help = ( *enqueue_lifo )( context, node, NULL );
1021    } else {
1022      needs_help = ( *enqueue_fifo )( context, node, NULL );
1023    }
1024  } else {
1025    ( *update )( context, node, new_priority );
1026
1027    needs_help = NULL;
1028  }
1029
1030  return needs_help;
1031}
1032
1033static inline Thread_Control *_Scheduler_SMP_Ask_for_help_X(
1034  Scheduler_Context                  *context,
1035  Thread_Control                     *offers_help,
1036  Thread_Control                     *needs_help,
1037  Scheduler_SMP_Enqueue               enqueue_fifo
1038)
1039{
1040  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help );
1041  Thread_Control *next_needs_help = NULL;
1042  Thread_Control *previous_accepts_help;
1043
1044  previous_accepts_help = node->Base.accepts_help;
1045  node->Base.accepts_help = needs_help;
1046
1047  switch ( node->state ) {
1048    case SCHEDULER_SMP_NODE_READY:
1049      next_needs_help =
1050        _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help );
1051      break;
1052    case SCHEDULER_SMP_NODE_SCHEDULED:
1053      next_needs_help = _Scheduler_Ask_scheduled_node_for_help(
1054        context,
1055        &node->Base,
1056        offers_help,
1057        needs_help,
1058        previous_accepts_help,
1059        _Scheduler_SMP_Release_idle_thread
1060      );
1061      break;
1062    case SCHEDULER_SMP_NODE_BLOCKED:
1063      if (
1064        _Scheduler_Ask_blocked_node_for_help(
1065          context,
1066          &node->Base,
1067          offers_help,
1068          needs_help
1069        )
1070      ) {
1071        _Scheduler_SMP_Node_change_state(
1072          &node->Base,
1073          SCHEDULER_SMP_NODE_READY
1074        );
1075
1076        next_needs_help = ( *enqueue_fifo )(
1077          context,
1078          &node->Base,
1079          needs_help
1080        );
1081      }
1082      break;
1083  }
1084
1085  return next_needs_help;
1086}
1087
1088static inline Thread_Control *_Scheduler_SMP_Yield(
1089  Scheduler_Context               *context,
1090  Thread_Control                  *thread,
1091  Scheduler_Node                  *node,
1092  Scheduler_SMP_Extract            extract_from_ready,
1093  Scheduler_SMP_Enqueue            enqueue_fifo,
1094  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo
1095)
1096{
1097  Thread_Control *needs_help;
1098
1099  if ( _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_SCHEDULED ) {
1100    _Scheduler_SMP_Extract_from_scheduled( node );
1101
1102    needs_help = ( *enqueue_scheduled_fifo )( context, node );
1103  } else {
1104    ( *extract_from_ready )( context, node );
1105
1106    needs_help = ( *enqueue_fifo )( context, node, NULL );
1107  }
1108
1109  return needs_help;
1110}
1111
1112static inline void _Scheduler_SMP_Insert_scheduled_lifo(
1113  Scheduler_Context *context,
1114  Scheduler_Node    *node_to_insert
1115)
1116{
1117  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1118
1119  _Chain_Insert_ordered_unprotected(
1120    &self->Scheduled,
1121    &node_to_insert->Node,
1122    _Scheduler_SMP_Insert_priority_lifo_order
1123  );
1124}
1125
1126static inline void _Scheduler_SMP_Insert_scheduled_fifo(
1127  Scheduler_Context *context,
1128  Scheduler_Node    *node_to_insert
1129)
1130{
1131  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1132
1133  _Chain_Insert_ordered_unprotected(
1134    &self->Scheduled,
1135    &node_to_insert->Node,
1136    _Scheduler_SMP_Insert_priority_fifo_order
1137  );
1138}
1139
1140/** @} */
1141
1142#ifdef __cplusplus
1143}
1144#endif /* __cplusplus */
1145
1146#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.