source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ d097b546

5
Last change on this file since d097b546 was d097b546, checked in by Sebastian Huber <sebastian.huber@…>, on 09/21/16 at 13:17:37

score: Rename scheduler ask for help stuff

Rename the scheduler ask for help stuff since this will be replaced step
by step with a second generation of the scheduler helping protocol.
Keep the old one for now in parallel to reduce the patch set sizes.

Update #2556.

  • Property mode set to 100644
File size: 29.7 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013-2015 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
41 * - @ref SCHEDULER_SMP_NODE_READY.
42 *
43 * State transitions are triggered via basic operations
44 * - _Scheduler_SMP_Enqueue_ordered(),
45 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
46 * - _Scheduler_SMP_Block().
47 *
48 * @dot
49 * digraph {
50 *   node [style="filled"];
51 *
52 *   bs [label="BLOCKED"];
53 *   ss [label="SCHEDULED", fillcolor="green"];
54 *   rs [label="READY", fillcolor="red"];
55 *
56 *   edge [label="enqueue"];
57 *   edge [fontcolor="darkgreen", color="darkgreen"];
58 *
59 *   bs -> ss;
60 *
61 *   edge [fontcolor="red", color="red"];
62 *
63 *   bs -> rs;
64 *
65 *   edge [label="enqueue other"];
66 *
67 *   ss -> rs;
68 *
69 *   edge [label="block"];
70 *   edge [fontcolor="black", color="black"];
71 *
72 *   ss -> bs;
73 *   rs -> bs;
74 *
75 *   edge [label="block other"];
76 *   edge [fontcolor="darkgreen", color="darkgreen"];
77 *
78 *   rs -> ss;
79 * }
80 * @enddot
81 *
82 * During system initialization each processor of the scheduler instance starts
83 * with an idle thread assigned to it.  Lets have a look at an example with two
84 * idle threads I and J with priority 5.  We also have blocked threads A, B and
85 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
86 * with respect to the thread priority from left to right in the below
87 * diagrams.  The highest priority node (lowest priority number) is the
88 * leftmost node.  Since the processor assignment is independent of the thread
89 * priority the processor indices may move from one state to the other.
90 *
91 * @dot
92 * digraph {
93 *   node [style="filled"];
94 *   edge [dir="none"];
95 *   subgraph {
96 *     rank = same;
97 *
98 *     i [label="I (5)", fillcolor="green"];
99 *     j [label="J (5)", fillcolor="green"];
100 *     a [label="A (1)"];
101 *     b [label="B (2)"];
102 *     c [label="C (3)"];
103 *     i -> j;
104 *   }
105 *
106 *   subgraph {
107 *     rank = same;
108 *
109 *     p0 [label="PROCESSOR 0", shape="box"];
110 *     p1 [label="PROCESSOR 1", shape="box"];
111 *   }
112 *
113 *   i -> p0;
114 *   j -> p1;
115 * }
116 * @enddot
117 *
118 * Lets start A.  For this an enqueue operation is performed.
119 *
120 * @dot
121 * digraph {
122 *   node [style="filled"];
123 *   edge [dir="none"];
124 *
125 *   subgraph {
126 *     rank = same;
127 *
128 *     i [label="I (5)", fillcolor="green"];
129 *     j [label="J (5)", fillcolor="red"];
130 *     a [label="A (1)", fillcolor="green"];
131 *     b [label="B (2)"];
132 *     c [label="C (3)"];
133 *     a -> i;
134 *   }
135 *
136 *   subgraph {
137 *     rank = same;
138 *
139 *     p0 [label="PROCESSOR 0", shape="box"];
140 *     p1 [label="PROCESSOR 1", shape="box"];
141 *   }
142 *
143 *   i -> p0;
144 *   a -> p1;
145 * }
146 * @enddot
147 *
148 * Lets start C.
149 *
150 * @dot
151 * digraph {
152 *   node [style="filled"];
153 *   edge [dir="none"];
154 *
155 *   subgraph {
156 *     rank = same;
157 *
158 *     a [label="A (1)", fillcolor="green"];
159 *     c [label="C (3)", fillcolor="green"];
160 *     i [label="I (5)", fillcolor="red"];
161 *     j [label="J (5)", fillcolor="red"];
162 *     b [label="B (2)"];
163 *     a -> c;
164 *     i -> j;
165 *   }
166 *
167 *   subgraph {
168 *     rank = same;
169 *
170 *     p0 [label="PROCESSOR 0", shape="box"];
171 *     p1 [label="PROCESSOR 1", shape="box"];
172 *   }
173 *
174 *   c -> p0;
175 *   a -> p1;
176 * }
177 * @enddot
178 *
179 * Lets start B.
180 *
181 * @dot
182 * digraph {
183 *   node [style="filled"];
184 *   edge [dir="none"];
185 *
186 *   subgraph {
187 *     rank = same;
188 *
189 *     a [label="A (1)", fillcolor="green"];
190 *     b [label="B (2)", fillcolor="green"];
191 *     c [label="C (3)", fillcolor="red"];
192 *     i [label="I (5)", fillcolor="red"];
193 *     j [label="J (5)", fillcolor="red"];
194 *     a -> b;
195 *     c -> i -> j;
196 *   }
197 *
198 *   subgraph {
199 *     rank = same;
200 *
201 *     p0 [label="PROCESSOR 0", shape="box"];
202 *     p1 [label="PROCESSOR 1", shape="box"];
203 *   }
204 *
205 *   b -> p0;
206 *   a -> p1;
207 * }
208 * @enddot
209 *
210 * Lets change the priority of thread A to 4.
211 *
212 * @dot
213 * digraph {
214 *   node [style="filled"];
215 *   edge [dir="none"];
216 *
217 *   subgraph {
218 *     rank = same;
219 *
220 *     b [label="B (2)", fillcolor="green"];
221 *     c [label="C (3)", fillcolor="green"];
222 *     a [label="A (4)", fillcolor="red"];
223 *     i [label="I (5)", fillcolor="red"];
224 *     j [label="J (5)", fillcolor="red"];
225 *     b -> c;
226 *     a -> i -> j;
227 *   }
228 *
229 *   subgraph {
230 *     rank = same;
231 *
232 *     p0 [label="PROCESSOR 0", shape="box"];
233 *     p1 [label="PROCESSOR 1", shape="box"];
234 *   }
235 *
236 *   b -> p0;
237 *   c -> p1;
238 * }
239 * @enddot
240 *
241 * Now perform a blocking operation with thread B.  Please note that thread A
242 * migrated now from processor 0 to processor 1 and thread C still executes on
243 * processor 1.
244 *
245 * @dot
246 * digraph {
247 *   node [style="filled"];
248 *   edge [dir="none"];
249 *
250 *   subgraph {
251 *     rank = same;
252 *
253 *     c [label="C (3)", fillcolor="green"];
254 *     a [label="A (4)", fillcolor="green"];
255 *     i [label="I (5)", fillcolor="red"];
256 *     j [label="J (5)", fillcolor="red"];
257 *     b [label="B (2)"];
258 *     c -> a;
259 *     i -> j;
260 *   }
261 *
262 *   subgraph {
263 *     rank = same;
264 *
265 *     p0 [label="PROCESSOR 0", shape="box"];
266 *     p1 [label="PROCESSOR 1", shape="box"];
267 *   }
268 *
269 *   a -> p0;
270 *   c -> p1;
271 * }
272 * @enddot
273 *
274 * @{
275 */
276
277typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
278  Scheduler_Context *context,
279  Scheduler_Node    *node
280);
281
282typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
283  Scheduler_Context *context,
284  Scheduler_Node    *filter,
285  Chain_Node_order   order
286);
287
288typedef void ( *Scheduler_SMP_Extract )(
289  Scheduler_Context *context,
290  Scheduler_Node    *node_to_extract
291);
292
293typedef void ( *Scheduler_SMP_Insert )(
294  Scheduler_Context *context,
295  Scheduler_Node    *node_to_insert
296);
297
298typedef void ( *Scheduler_SMP_Move )(
299  Scheduler_Context *context,
300  Scheduler_Node    *node_to_move
301);
302
303typedef void ( *Scheduler_SMP_Update )(
304  Scheduler_Context *context,
305  Scheduler_Node    *node_to_update,
306  Priority_Control   new_priority
307);
308
309typedef Thread_Control *( *Scheduler_SMP_Enqueue )(
310  Scheduler_Context *context,
311  Scheduler_Node    *node_to_enqueue,
312  Thread_Control    *needs_help
313);
314
315typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )(
316  Scheduler_Context *context,
317  Scheduler_Node    *node_to_enqueue
318);
319
320typedef void ( *Scheduler_SMP_Allocate_processor )(
321  Scheduler_Context *context,
322  Thread_Control    *scheduled,
323  Thread_Control    *victim
324);
325
326static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
327  const Chain_Node *to_insert,
328  const Chain_Node *next
329)
330{
331  const Scheduler_SMP_Node *node_to_insert =
332    (const Scheduler_SMP_Node *) to_insert;
333  const Scheduler_SMP_Node *node_next =
334    (const Scheduler_SMP_Node *) next;
335
336  return node_to_insert->priority <= node_next->priority;
337}
338
339static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
340  const Chain_Node *to_insert,
341  const Chain_Node *next
342)
343{
344  const Scheduler_SMP_Node *node_to_insert =
345    (const Scheduler_SMP_Node *) to_insert;
346  const Scheduler_SMP_Node *node_next =
347    (const Scheduler_SMP_Node *) next;
348
349  return node_to_insert->priority < node_next->priority;
350}
351
352static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
353  Scheduler_Context *context
354)
355{
356  return (Scheduler_SMP_Context *) context;
357}
358
359static inline void _Scheduler_SMP_Initialize(
360  Scheduler_SMP_Context *self
361)
362{
363  _Chain_Initialize_empty( &self->Scheduled );
364  _Chain_Initialize_empty( &self->Idle_threads );
365}
366
367static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
368  Thread_Control *thread
369)
370{
371  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread );
372}
373
374static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
375  Thread_Control *thread
376)
377{
378  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_own_node( thread );
379}
380
381static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
382  Scheduler_Node *node
383)
384{
385  return (Scheduler_SMP_Node *) node;
386}
387
388static inline void _Scheduler_SMP_Node_initialize(
389  const Scheduler_Control *scheduler,
390  Scheduler_SMP_Node      *node,
391  Thread_Control          *thread,
392  Priority_Control         priority
393)
394{
395  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
396  node->state = SCHEDULER_SMP_NODE_BLOCKED;
397  node->priority = priority;
398}
399
400static inline void _Scheduler_SMP_Node_update_priority(
401  Scheduler_SMP_Node *node,
402  Priority_Control    new_priority
403)
404{
405  node->priority = new_priority;
406}
407
408extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
409
410static inline void _Scheduler_SMP_Node_change_state(
411  Scheduler_SMP_Node      *node,
412  Scheduler_SMP_Node_state new_state
413)
414{
415  _Assert(
416    _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ]
417  );
418
419  node->state = new_state;
420}
421
422static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
423  const Scheduler_Context *context,
424  const Per_CPU_Control   *cpu
425)
426{
427  return cpu->scheduler_context == context;
428}
429
430static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
431  Scheduler_Context *context
432)
433{
434  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
435  Thread_Control *idle = (Thread_Control *)
436    _Chain_Get_first_unprotected( &self->Idle_threads );
437
438  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
439
440  return idle;
441}
442
443static inline void _Scheduler_SMP_Release_idle_thread(
444  Scheduler_Context *context,
445  Thread_Control    *idle
446)
447{
448  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
449
450  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
451}
452
453static inline void _Scheduler_SMP_Allocate_processor_lazy(
454  Scheduler_Context *context,
455  Thread_Control    *scheduled_thread,
456  Thread_Control    *victim_thread
457)
458{
459  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
460  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
461  Per_CPU_Control *cpu_self = _Per_CPU_Get();
462  Thread_Control *heir;
463
464  _Assert( _ISR_Get_level() != 0 );
465
466  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
467    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
468      heir = scheduled_cpu->heir;
469      _Thread_Dispatch_update_heir(
470        cpu_self,
471        scheduled_cpu,
472        scheduled_thread
473      );
474    } else {
475      /* We have to force a migration to our processor set */
476      heir = scheduled_thread;
477    }
478  } else {
479    heir = scheduled_thread;
480  }
481
482  if ( heir != victim_thread ) {
483    _Thread_Set_CPU( heir, victim_cpu );
484    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
485  }
486}
487
488/*
489 * This method is slightly different from
490 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
491 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
492 * but does not take into account affinity.
493 */
494static inline void _Scheduler_SMP_Allocate_processor_exact(
495  Scheduler_Context *context,
496  Thread_Control    *scheduled_thread,
497  Thread_Control    *victim_thread
498)
499{
500  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
501  Per_CPU_Control *cpu_self = _Per_CPU_Get();
502
503  (void) context;
504
505  _Thread_Set_CPU( scheduled_thread, victim_cpu );
506  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
507}
508
509static inline void _Scheduler_SMP_Allocate_processor(
510  Scheduler_Context                *context,
511  Scheduler_Node                   *scheduled,
512  Scheduler_Node                   *victim,
513  Scheduler_SMP_Allocate_processor  allocate_processor
514)
515{
516  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
517  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
518
519  _Scheduler_SMP_Node_change_state(
520    _Scheduler_SMP_Node_downcast( scheduled ),
521    SCHEDULER_SMP_NODE_SCHEDULED
522  );
523  _Scheduler_Thread_change_state( scheduled_thread, THREAD_SCHEDULER_SCHEDULED );
524
525  ( *allocate_processor )( context, scheduled_thread, victim_thread );
526}
527
528static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
529  Scheduler_Context *context,
530  Scheduler_Node    *filter,
531  Chain_Node_order   order
532)
533{
534  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
535  Chain_Control *scheduled = &self->Scheduled;
536  Scheduler_Node *lowest_scheduled =
537    (Scheduler_Node *) _Chain_Last( scheduled );
538
539  (void) filter;
540  (void) order;
541
542  _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
543
544  return lowest_scheduled;
545}
546
547static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
548  Scheduler_Context                *context,
549  Scheduler_Node                   *node,
550  Scheduler_Node                   *lowest_scheduled,
551  Scheduler_SMP_Insert              insert_scheduled,
552  Scheduler_SMP_Move                move_from_scheduled_to_ready,
553  Scheduler_SMP_Allocate_processor  allocate_processor
554)
555{
556  Thread_Control *needs_help;
557  Scheduler_Try_to_schedule_action action;
558
559  action = _Scheduler_Try_to_schedule_node(
560    context,
561    node,
562    _Scheduler_Node_get_idle( lowest_scheduled ),
563    _Scheduler_SMP_Get_idle_thread
564  );
565
566  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
567    Thread_Control *lowest_scheduled_user =
568      _Scheduler_Node_get_user( lowest_scheduled );
569    Thread_Control *idle;
570
571    _Scheduler_SMP_Node_change_state(
572      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
573      SCHEDULER_SMP_NODE_READY
574    );
575    _Scheduler_Thread_change_state(
576      lowest_scheduled_user,
577      THREAD_SCHEDULER_READY
578    );
579
580    _Scheduler_SMP_Allocate_processor(
581      context,
582      node,
583      lowest_scheduled,
584      allocate_processor
585    );
586
587    ( *insert_scheduled )( context, node );
588    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
589
590    idle = _Scheduler_Release_idle_thread(
591      context,
592      lowest_scheduled,
593      _Scheduler_SMP_Release_idle_thread
594    );
595    if ( idle == NULL ) {
596      needs_help = lowest_scheduled_user;
597    } else {
598      needs_help = NULL;
599    }
600  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
601    _Scheduler_SMP_Node_change_state(
602      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
603      SCHEDULER_SMP_NODE_READY
604    );
605    _Scheduler_SMP_Node_change_state(
606      _Scheduler_SMP_Node_downcast( node ),
607      SCHEDULER_SMP_NODE_SCHEDULED
608    );
609
610    ( *insert_scheduled )( context, node );
611    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
612
613    _Scheduler_Exchange_idle_thread(
614      node,
615      lowest_scheduled,
616      _Scheduler_Node_get_idle( lowest_scheduled )
617    );
618
619    needs_help = NULL;
620  } else {
621    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
622    _Scheduler_SMP_Node_change_state(
623      _Scheduler_SMP_Node_downcast( node ),
624      SCHEDULER_SMP_NODE_BLOCKED
625    );
626    needs_help = NULL;
627  }
628
629  return needs_help;
630}
631
632/**
633 * @brief Enqueues a node according to the specified order function.
634 *
635 * The node must not be in the scheduled state.
636 *
637 * @param[in] context The scheduler instance context.
638 * @param[in] node The node to enqueue.
639 * @param[in] needs_help The thread needing help in case the node cannot be
640 *   scheduled.
641 * @param[in] order The order function.
642 * @param[in] insert_ready Function to insert a node into the set of ready
643 *   nodes.
644 * @param[in] insert_scheduled Function to insert a node into the set of
645 *   scheduled nodes.
646 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
647 *   of scheduled nodes to the set of ready nodes.
648 * @param[in] get_lowest_scheduled Function to select the node from the
649 *   scheduled nodes to replace.  It may not be possible to find one, in this
650 *   case a pointer must be returned so that the order functions returns false
651 *   if this pointer is passed as the second argument to the order function.
652 * @param[in] allocate_processor Function to allocate a processor to a node
653 *   based on the rules of the scheduler.
654 */
655static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
656  Scheduler_Context                  *context,
657  Scheduler_Node                     *node,
658  Thread_Control                     *needs_help,
659  Chain_Node_order                    order,
660  Scheduler_SMP_Insert                insert_ready,
661  Scheduler_SMP_Insert                insert_scheduled,
662  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
663  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
664  Scheduler_SMP_Allocate_processor    allocate_processor
665)
666{
667  Scheduler_Node *lowest_scheduled =
668    ( *get_lowest_scheduled )( context, node, order );
669
670  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
671    needs_help = _Scheduler_SMP_Enqueue_to_scheduled(
672      context,
673      node,
674      lowest_scheduled,
675      insert_scheduled,
676      move_from_scheduled_to_ready,
677      allocate_processor
678    );
679  } else {
680    ( *insert_ready )( context, node );
681  }
682
683  return needs_help;
684}
685
686/**
687 * @brief Enqueues a scheduled node according to the specified order
688 * function.
689 *
690 * @param[in] context The scheduler instance context.
691 * @param[in] node The node to enqueue.
692 * @param[in] order The order function.
693 * @param[in] extract_from_ready Function to extract a node from the set of
694 *   ready nodes.
695 * @param[in] get_highest_ready Function to get the highest ready node.
696 * @param[in] insert_ready Function to insert a node into the set of ready
697 *   nodes.
698 * @param[in] insert_scheduled Function to insert a node into the set of
699 *   scheduled nodes.
700 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
701 *   of ready nodes to the set of scheduled nodes.
702 * @param[in] allocate_processor Function to allocate a processor to a node
703 *   based on the rules of the scheduler.
704 */
705static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
706  Scheduler_Context                *context,
707  Scheduler_Node                   *node,
708  Chain_Node_order                  order,
709  Scheduler_SMP_Extract             extract_from_ready,
710  Scheduler_SMP_Get_highest_ready   get_highest_ready,
711  Scheduler_SMP_Insert              insert_ready,
712  Scheduler_SMP_Insert              insert_scheduled,
713  Scheduler_SMP_Move                move_from_ready_to_scheduled,
714  Scheduler_SMP_Allocate_processor  allocate_processor
715)
716{
717  while ( true ) {
718    Scheduler_Node                   *highest_ready;
719    Scheduler_Try_to_schedule_action  action;
720
721    highest_ready = ( *get_highest_ready )( context, node );
722
723    /*
724     * The node has been extracted from the scheduled chain.  We have to place
725     * it now on the scheduled or ready set.
726     */
727    if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
728      ( *insert_scheduled )( context, node );
729      return NULL;
730    }
731
732    action = _Scheduler_Try_to_schedule_node(
733      context,
734      highest_ready,
735      _Scheduler_Node_get_idle( node ),
736      _Scheduler_SMP_Get_idle_thread
737    );
738
739    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
740      Thread_Control *user = _Scheduler_Node_get_user( node );
741      Thread_Control *idle;
742
743      _Scheduler_SMP_Node_change_state(
744        _Scheduler_SMP_Node_downcast( node ),
745        SCHEDULER_SMP_NODE_READY
746      );
747      _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
748
749      _Scheduler_SMP_Allocate_processor(
750        context,
751        highest_ready,
752        node,
753        allocate_processor
754      );
755
756      ( *insert_ready )( context, node );
757      ( *move_from_ready_to_scheduled )( context, highest_ready );
758
759      idle = _Scheduler_Release_idle_thread(
760        context,
761        node,
762        _Scheduler_SMP_Release_idle_thread
763      );
764
765      if ( idle == NULL ) {
766        return user;
767      } else {
768        return NULL;
769      }
770    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
771      _Scheduler_SMP_Node_change_state(
772        _Scheduler_SMP_Node_downcast( node ),
773        SCHEDULER_SMP_NODE_READY
774      );
775      _Scheduler_SMP_Node_change_state(
776        _Scheduler_SMP_Node_downcast( highest_ready ),
777        SCHEDULER_SMP_NODE_SCHEDULED
778      );
779
780      ( *insert_ready )( context, node );
781      ( *move_from_ready_to_scheduled )( context, highest_ready );
782
783      _Scheduler_Exchange_idle_thread(
784        highest_ready,
785        node,
786        _Scheduler_Node_get_idle( node )
787      );
788      return NULL;
789    } else {
790      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
791
792      _Scheduler_SMP_Node_change_state(
793        _Scheduler_SMP_Node_downcast( highest_ready ),
794        SCHEDULER_SMP_NODE_BLOCKED
795      );
796
797      ( *extract_from_ready )( context, highest_ready );
798    }
799  }
800}
801
802static inline void _Scheduler_SMP_Extract_from_scheduled(
803  Scheduler_Node *node
804)
805{
806  _Chain_Extract_unprotected( &node->Node );
807}
808
809static inline void _Scheduler_SMP_Schedule_highest_ready(
810  Scheduler_Context                *context,
811  Scheduler_Node                   *victim,
812  Scheduler_SMP_Extract             extract_from_ready,
813  Scheduler_SMP_Get_highest_ready   get_highest_ready,
814  Scheduler_SMP_Move                move_from_ready_to_scheduled,
815  Scheduler_SMP_Allocate_processor  allocate_processor
816)
817{
818  Scheduler_Try_to_schedule_action action;
819
820  do {
821    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
822
823    action = _Scheduler_Try_to_schedule_node(
824      context,
825      highest_ready,
826      NULL,
827      _Scheduler_SMP_Get_idle_thread
828    );
829
830    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
831      _Scheduler_SMP_Allocate_processor(
832        context,
833        highest_ready,
834        victim,
835        allocate_processor
836      );
837
838      ( *move_from_ready_to_scheduled )( context, highest_ready );
839    } else {
840      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
841
842      _Scheduler_SMP_Node_change_state(
843        _Scheduler_SMP_Node_downcast( highest_ready ),
844        SCHEDULER_SMP_NODE_BLOCKED
845      );
846
847      ( *extract_from_ready )( context, highest_ready );
848    }
849  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
850}
851
852/**
853 * @brief Blocks a thread.
854 *
855 * @param[in] context The scheduler instance context.
856 * @param[in] thread The thread of the scheduling operation.
857 * @param[in] extract_from_ready Function to extract a node from the set of
858 *   ready nodes.
859 * @param[in] get_highest_ready Function to get the highest ready node.
860 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
861 *   of ready nodes to the set of scheduled nodes.
862 */
863static inline void _Scheduler_SMP_Block(
864  Scheduler_Context                *context,
865  Thread_Control                   *thread,
866  Scheduler_SMP_Extract             extract_from_ready,
867  Scheduler_SMP_Get_highest_ready   get_highest_ready,
868  Scheduler_SMP_Move                move_from_ready_to_scheduled,
869  Scheduler_SMP_Allocate_processor  allocate_processor
870)
871{
872  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
873  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
874  bool block;
875
876  _Assert( is_scheduled || node->state == SCHEDULER_SMP_NODE_READY );
877
878  block = _Scheduler_Block_node(
879    context,
880    thread,
881    &node->Base,
882    is_scheduled,
883    _Scheduler_SMP_Get_idle_thread
884  );
885  if ( block ) {
886    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
887
888    if ( is_scheduled ) {
889      _Scheduler_SMP_Extract_from_scheduled( &node->Base );
890
891      _Scheduler_SMP_Schedule_highest_ready(
892        context,
893        &node->Base,
894        extract_from_ready,
895        get_highest_ready,
896        move_from_ready_to_scheduled,
897        allocate_processor
898      );
899    } else {
900      ( *extract_from_ready )( context, &node->Base );
901    }
902  }
903}
904
905static inline Thread_Control *_Scheduler_SMP_Unblock(
906  Scheduler_Context     *context,
907  Thread_Control        *thread,
908  Scheduler_SMP_Update   update,
909  Scheduler_SMP_Enqueue  enqueue_fifo
910)
911{
912  Scheduler_SMP_Node *node;
913  bool                is_scheduled;
914  bool                unblock;
915  Thread_Control     *needs_help;
916
917  node = _Scheduler_SMP_Thread_get_node( thread );
918  is_scheduled = ( node->state == SCHEDULER_SMP_NODE_SCHEDULED );
919  unblock = _Scheduler_Unblock_node(
920    context,
921    thread,
922    &node->Base,
923    is_scheduled,
924    _Scheduler_SMP_Release_idle_thread
925  );
926
927  if ( unblock ) {
928    Priority_Control new_priority;
929    bool             prepend_it;
930
931    new_priority = _Scheduler_Node_get_priority( &node->Base, &prepend_it );
932    (void) prepend_it;
933
934    if ( new_priority != node->priority ) {
935      ( *update )( context, &node->Base, new_priority );
936    }
937
938    if ( node->state == SCHEDULER_SMP_NODE_BLOCKED ) {
939      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
940
941      needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
942    } else {
943      _Assert( node->state == SCHEDULER_SMP_NODE_READY );
944      _Assert(
945        node->Base.help_state == SCHEDULER_HELP_ACTIVE_OWNER
946          || node->Base.help_state == SCHEDULER_HELP_ACTIVE_RIVAL
947      );
948      _Assert( node->Base.idle == NULL );
949
950      if ( node->Base.accepts_help == thread ) {
951        needs_help = thread;
952      } else {
953        needs_help = NULL;
954      }
955    }
956  } else {
957    needs_help = NULL;
958  }
959
960  return needs_help;
961}
962
963static inline Thread_Control *_Scheduler_SMP_Update_priority(
964  Scheduler_Context               *context,
965  Thread_Control                  *thread,
966  Scheduler_SMP_Extract            extract_from_ready,
967  Scheduler_SMP_Update             update,
968  Scheduler_SMP_Enqueue            enqueue_fifo,
969  Scheduler_SMP_Enqueue            enqueue_lifo,
970  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo,
971  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_lifo
972)
973{
974  Scheduler_SMP_Node *node;
975  Thread_Control     *needs_help;
976  Priority_Control    new_priority;
977  bool                prepend_it;
978
979  node = _Scheduler_SMP_Thread_get_own_node( thread );
980  new_priority = _Scheduler_Node_get_priority( &node->Base, &prepend_it );
981
982  if ( new_priority == node->priority ) {
983    /* Nothing to do */
984    return NULL;
985  }
986
987  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
988    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
989
990    ( *update )( context, &node->Base, new_priority );
991
992    if ( prepend_it ) {
993      needs_help = ( *enqueue_scheduled_lifo )( context, &node->Base );
994    } else {
995      needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
996    }
997  } else if ( node->state == SCHEDULER_SMP_NODE_READY ) {
998    ( *extract_from_ready )( context, &node->Base );
999
1000    ( *update )( context, &node->Base, new_priority );
1001
1002    if ( prepend_it ) {
1003      needs_help = ( *enqueue_lifo )( context, &node->Base, NULL );
1004    } else {
1005      needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
1006    }
1007  } else {
1008    ( *update )( context, &node->Base, new_priority );
1009
1010    needs_help = NULL;
1011  }
1012
1013  return needs_help;
1014}
1015
1016static inline Thread_Control *_Scheduler_SMP_Ask_for_help_X(
1017  Scheduler_Context                  *context,
1018  Thread_Control                     *offers_help,
1019  Thread_Control                     *needs_help,
1020  Scheduler_SMP_Enqueue               enqueue_fifo
1021)
1022{
1023  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help );
1024  Thread_Control *next_needs_help = NULL;
1025  Thread_Control *previous_accepts_help;
1026
1027  previous_accepts_help = node->Base.accepts_help;
1028  node->Base.accepts_help = needs_help;
1029
1030  switch ( node->state ) {
1031    case SCHEDULER_SMP_NODE_READY:
1032      next_needs_help =
1033        _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help );
1034      break;
1035    case SCHEDULER_SMP_NODE_SCHEDULED:
1036      next_needs_help = _Scheduler_Ask_scheduled_node_for_help(
1037        context,
1038        &node->Base,
1039        offers_help,
1040        needs_help,
1041        previous_accepts_help,
1042        _Scheduler_SMP_Release_idle_thread
1043      );
1044      break;
1045    case SCHEDULER_SMP_NODE_BLOCKED:
1046      if (
1047        _Scheduler_Ask_blocked_node_for_help(
1048          context,
1049          &node->Base,
1050          offers_help,
1051          needs_help
1052        )
1053      ) {
1054        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1055
1056        next_needs_help = ( *enqueue_fifo )(
1057          context,
1058          &node->Base,
1059          needs_help
1060        );
1061      }
1062      break;
1063  }
1064
1065  return next_needs_help;
1066}
1067
1068static inline Thread_Control *_Scheduler_SMP_Yield(
1069  Scheduler_Context               *context,
1070  Thread_Control                  *thread,
1071  Scheduler_SMP_Extract            extract_from_ready,
1072  Scheduler_SMP_Enqueue            enqueue_fifo,
1073  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo
1074)
1075{
1076  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
1077  Thread_Control *needs_help;
1078
1079  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1080    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
1081
1082    needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
1083  } else {
1084    ( *extract_from_ready )( context, &node->Base );
1085
1086    needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
1087  }
1088
1089  return needs_help;
1090}
1091
1092static inline void _Scheduler_SMP_Insert_scheduled_lifo(
1093  Scheduler_Context *context,
1094  Scheduler_Node    *node_to_insert
1095)
1096{
1097  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1098
1099  _Chain_Insert_ordered_unprotected(
1100    &self->Scheduled,
1101    &node_to_insert->Node,
1102    _Scheduler_SMP_Insert_priority_lifo_order
1103  );
1104}
1105
1106static inline void _Scheduler_SMP_Insert_scheduled_fifo(
1107  Scheduler_Context *context,
1108  Scheduler_Node    *node_to_insert
1109)
1110{
1111  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1112
1113  _Chain_Insert_ordered_unprotected(
1114    &self->Scheduled,
1115    &node_to_insert->Node,
1116    _Scheduler_SMP_Insert_priority_fifo_order
1117  );
1118}
1119
1120/** @} */
1121
1122#ifdef __cplusplus
1123}
1124#endif /* __cplusplus */
1125
1126#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.