source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ 29594b4

5
Last change on this file since 29594b4 was 29594b4, checked in by Sebastian Huber <sebastian.huber@…>, on 09/07/16 at 07:27:59

score: Remove superfluous SMP debug support

This information turned out to be useless in the last couple of months.

  • Property mode set to 100644
File size: 29.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013-2015 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
41 * - @ref SCHEDULER_SMP_NODE_READY.
42 *
43 * State transitions are triggered via basic operations
44 * - _Scheduler_SMP_Enqueue_ordered(),
45 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
46 * - _Scheduler_SMP_Block().
47 *
48 * @dot
49 * digraph {
50 *   node [style="filled"];
51 *
52 *   bs [label="BLOCKED"];
53 *   ss [label="SCHEDULED", fillcolor="green"];
54 *   rs [label="READY", fillcolor="red"];
55 *
56 *   edge [label="enqueue"];
57 *   edge [fontcolor="darkgreen", color="darkgreen"];
58 *
59 *   bs -> ss;
60 *
61 *   edge [fontcolor="red", color="red"];
62 *
63 *   bs -> rs;
64 *
65 *   edge [label="enqueue other"];
66 *
67 *   ss -> rs;
68 *
69 *   edge [label="block"];
70 *   edge [fontcolor="black", color="black"];
71 *
72 *   ss -> bs;
73 *   rs -> bs;
74 *
75 *   edge [label="block other"];
76 *   edge [fontcolor="darkgreen", color="darkgreen"];
77 *
78 *   rs -> ss;
79 * }
80 * @enddot
81 *
82 * During system initialization each processor of the scheduler instance starts
83 * with an idle thread assigned to it.  Lets have a look at an example with two
84 * idle threads I and J with priority 5.  We also have blocked threads A, B and
85 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
86 * with respect to the thread priority from left to right in the below
87 * diagrams.  The highest priority node (lowest priority number) is the
88 * leftmost node.  Since the processor assignment is independent of the thread
89 * priority the processor indices may move from one state to the other.
90 *
91 * @dot
92 * digraph {
93 *   node [style="filled"];
94 *   edge [dir="none"];
95 *   subgraph {
96 *     rank = same;
97 *
98 *     i [label="I (5)", fillcolor="green"];
99 *     j [label="J (5)", fillcolor="green"];
100 *     a [label="A (1)"];
101 *     b [label="B (2)"];
102 *     c [label="C (3)"];
103 *     i -> j;
104 *   }
105 *
106 *   subgraph {
107 *     rank = same;
108 *
109 *     p0 [label="PROCESSOR 0", shape="box"];
110 *     p1 [label="PROCESSOR 1", shape="box"];
111 *   }
112 *
113 *   i -> p0;
114 *   j -> p1;
115 * }
116 * @enddot
117 *
118 * Lets start A.  For this an enqueue operation is performed.
119 *
120 * @dot
121 * digraph {
122 *   node [style="filled"];
123 *   edge [dir="none"];
124 *
125 *   subgraph {
126 *     rank = same;
127 *
128 *     i [label="I (5)", fillcolor="green"];
129 *     j [label="J (5)", fillcolor="red"];
130 *     a [label="A (1)", fillcolor="green"];
131 *     b [label="B (2)"];
132 *     c [label="C (3)"];
133 *     a -> i;
134 *   }
135 *
136 *   subgraph {
137 *     rank = same;
138 *
139 *     p0 [label="PROCESSOR 0", shape="box"];
140 *     p1 [label="PROCESSOR 1", shape="box"];
141 *   }
142 *
143 *   i -> p0;
144 *   a -> p1;
145 * }
146 * @enddot
147 *
148 * Lets start C.
149 *
150 * @dot
151 * digraph {
152 *   node [style="filled"];
153 *   edge [dir="none"];
154 *
155 *   subgraph {
156 *     rank = same;
157 *
158 *     a [label="A (1)", fillcolor="green"];
159 *     c [label="C (3)", fillcolor="green"];
160 *     i [label="I (5)", fillcolor="red"];
161 *     j [label="J (5)", fillcolor="red"];
162 *     b [label="B (2)"];
163 *     a -> c;
164 *     i -> j;
165 *   }
166 *
167 *   subgraph {
168 *     rank = same;
169 *
170 *     p0 [label="PROCESSOR 0", shape="box"];
171 *     p1 [label="PROCESSOR 1", shape="box"];
172 *   }
173 *
174 *   c -> p0;
175 *   a -> p1;
176 * }
177 * @enddot
178 *
179 * Lets start B.
180 *
181 * @dot
182 * digraph {
183 *   node [style="filled"];
184 *   edge [dir="none"];
185 *
186 *   subgraph {
187 *     rank = same;
188 *
189 *     a [label="A (1)", fillcolor="green"];
190 *     b [label="B (2)", fillcolor="green"];
191 *     c [label="C (3)", fillcolor="red"];
192 *     i [label="I (5)", fillcolor="red"];
193 *     j [label="J (5)", fillcolor="red"];
194 *     a -> b;
195 *     c -> i -> j;
196 *   }
197 *
198 *   subgraph {
199 *     rank = same;
200 *
201 *     p0 [label="PROCESSOR 0", shape="box"];
202 *     p1 [label="PROCESSOR 1", shape="box"];
203 *   }
204 *
205 *   b -> p0;
206 *   a -> p1;
207 * }
208 * @enddot
209 *
210 * Lets change the priority of thread A to 4.
211 *
212 * @dot
213 * digraph {
214 *   node [style="filled"];
215 *   edge [dir="none"];
216 *
217 *   subgraph {
218 *     rank = same;
219 *
220 *     b [label="B (2)", fillcolor="green"];
221 *     c [label="C (3)", fillcolor="green"];
222 *     a [label="A (4)", fillcolor="red"];
223 *     i [label="I (5)", fillcolor="red"];
224 *     j [label="J (5)", fillcolor="red"];
225 *     b -> c;
226 *     a -> i -> j;
227 *   }
228 *
229 *   subgraph {
230 *     rank = same;
231 *
232 *     p0 [label="PROCESSOR 0", shape="box"];
233 *     p1 [label="PROCESSOR 1", shape="box"];
234 *   }
235 *
236 *   b -> p0;
237 *   c -> p1;
238 * }
239 * @enddot
240 *
241 * Now perform a blocking operation with thread B.  Please note that thread A
242 * migrated now from processor 0 to processor 1 and thread C still executes on
243 * processor 1.
244 *
245 * @dot
246 * digraph {
247 *   node [style="filled"];
248 *   edge [dir="none"];
249 *
250 *   subgraph {
251 *     rank = same;
252 *
253 *     c [label="C (3)", fillcolor="green"];
254 *     a [label="A (4)", fillcolor="green"];
255 *     i [label="I (5)", fillcolor="red"];
256 *     j [label="J (5)", fillcolor="red"];
257 *     b [label="B (2)"];
258 *     c -> a;
259 *     i -> j;
260 *   }
261 *
262 *   subgraph {
263 *     rank = same;
264 *
265 *     p0 [label="PROCESSOR 0", shape="box"];
266 *     p1 [label="PROCESSOR 1", shape="box"];
267 *   }
268 *
269 *   a -> p0;
270 *   c -> p1;
271 * }
272 * @enddot
273 *
274 * @{
275 */
276
277typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
278  Scheduler_Context *context,
279  Scheduler_Node    *node
280);
281
282typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
283  Scheduler_Context *context,
284  Scheduler_Node    *filter,
285  Chain_Node_order   order
286);
287
288typedef void ( *Scheduler_SMP_Extract )(
289  Scheduler_Context *context,
290  Scheduler_Node    *node_to_extract
291);
292
293typedef void ( *Scheduler_SMP_Insert )(
294  Scheduler_Context *context,
295  Scheduler_Node    *node_to_insert
296);
297
298typedef void ( *Scheduler_SMP_Move )(
299  Scheduler_Context *context,
300  Scheduler_Node    *node_to_move
301);
302
303typedef void ( *Scheduler_SMP_Update )(
304  Scheduler_Context *context,
305  Scheduler_Node    *node_to_update,
306  Priority_Control   new_priority
307);
308
309typedef Thread_Control *( *Scheduler_SMP_Enqueue )(
310  Scheduler_Context *context,
311  Scheduler_Node    *node_to_enqueue,
312  Thread_Control    *needs_help
313);
314
315typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )(
316  Scheduler_Context *context,
317  Scheduler_Node    *node_to_enqueue
318);
319
320typedef void ( *Scheduler_SMP_Allocate_processor )(
321  Scheduler_Context *context,
322  Thread_Control    *scheduled,
323  Thread_Control    *victim
324);
325
326static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
327  const Chain_Node *to_insert,
328  const Chain_Node *next
329)
330{
331  const Scheduler_SMP_Node *node_to_insert =
332    (const Scheduler_SMP_Node *) to_insert;
333  const Scheduler_SMP_Node *node_next =
334    (const Scheduler_SMP_Node *) next;
335
336  return node_to_insert->priority <= node_next->priority;
337}
338
339static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
340  const Chain_Node *to_insert,
341  const Chain_Node *next
342)
343{
344  const Scheduler_SMP_Node *node_to_insert =
345    (const Scheduler_SMP_Node *) to_insert;
346  const Scheduler_SMP_Node *node_next =
347    (const Scheduler_SMP_Node *) next;
348
349  return node_to_insert->priority < node_next->priority;
350}
351
352static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
353  Scheduler_Context *context
354)
355{
356  return (Scheduler_SMP_Context *) context;
357}
358
359static inline void _Scheduler_SMP_Initialize(
360  Scheduler_SMP_Context *self
361)
362{
363  _Chain_Initialize_empty( &self->Scheduled );
364  _Chain_Initialize_empty( &self->Idle_threads );
365}
366
367static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
368  Thread_Control *thread
369)
370{
371  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread );
372}
373
374static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
375  Thread_Control *thread
376)
377{
378  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_own_node( thread );
379}
380
381static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
382  Scheduler_Node *node
383)
384{
385  return (Scheduler_SMP_Node *) node;
386}
387
388static inline void _Scheduler_SMP_Node_initialize(
389  Scheduler_SMP_Node *node,
390  Thread_Control     *thread,
391  Priority_Control    priority
392)
393{
394  _Scheduler_Node_do_initialize( &node->Base, thread, priority );
395  node->state = SCHEDULER_SMP_NODE_BLOCKED;
396  node->priority = priority;
397}
398
399static inline void _Scheduler_SMP_Node_update_priority(
400  Scheduler_SMP_Node *node,
401  Priority_Control    new_priority
402)
403{
404  node->priority = new_priority;
405}
406
407extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
408
409static inline void _Scheduler_SMP_Node_change_state(
410  Scheduler_SMP_Node      *node,
411  Scheduler_SMP_Node_state new_state
412)
413{
414  _Assert(
415    _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ]
416  );
417
418  node->state = new_state;
419}
420
421static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
422  const Scheduler_Context *context,
423  const Per_CPU_Control   *cpu
424)
425{
426  return cpu->scheduler_context == context;
427}
428
429static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
430  Scheduler_Context *context
431)
432{
433  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
434  Thread_Control *idle = (Thread_Control *)
435    _Chain_Get_first_unprotected( &self->Idle_threads );
436
437  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
438
439  return idle;
440}
441
442static inline void _Scheduler_SMP_Release_idle_thread(
443  Scheduler_Context *context,
444  Thread_Control    *idle
445)
446{
447  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
448
449  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
450}
451
452static inline void _Scheduler_SMP_Allocate_processor_lazy(
453  Scheduler_Context *context,
454  Thread_Control    *scheduled_thread,
455  Thread_Control    *victim_thread
456)
457{
458  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
459  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
460  Per_CPU_Control *cpu_self = _Per_CPU_Get();
461  Thread_Control *heir;
462
463  _Assert( _ISR_Get_level() != 0 );
464
465  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
466    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
467      heir = scheduled_cpu->heir;
468      _Thread_Dispatch_update_heir(
469        cpu_self,
470        scheduled_cpu,
471        scheduled_thread
472      );
473    } else {
474      /* We have to force a migration to our processor set */
475      heir = scheduled_thread;
476    }
477  } else {
478    heir = scheduled_thread;
479  }
480
481  if ( heir != victim_thread ) {
482    _Thread_Set_CPU( heir, victim_cpu );
483    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
484  }
485}
486
487/*
488 * This method is slightly different from
489 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
490 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
491 * but does not take into account affinity.
492 */
493static inline void _Scheduler_SMP_Allocate_processor_exact(
494  Scheduler_Context *context,
495  Thread_Control    *scheduled_thread,
496  Thread_Control    *victim_thread
497)
498{
499  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
500  Per_CPU_Control *cpu_self = _Per_CPU_Get();
501
502  (void) context;
503
504  _Thread_Set_CPU( scheduled_thread, victim_cpu );
505  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
506}
507
508static inline void _Scheduler_SMP_Allocate_processor(
509  Scheduler_Context                *context,
510  Scheduler_Node                   *scheduled,
511  Scheduler_Node                   *victim,
512  Scheduler_SMP_Allocate_processor  allocate_processor
513)
514{
515  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
516  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
517
518  _Scheduler_SMP_Node_change_state(
519    _Scheduler_SMP_Node_downcast( scheduled ),
520    SCHEDULER_SMP_NODE_SCHEDULED
521  );
522  _Scheduler_Thread_change_state( scheduled_thread, THREAD_SCHEDULER_SCHEDULED );
523
524  ( *allocate_processor )( context, scheduled_thread, victim_thread );
525}
526
527static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
528  Scheduler_Context *context,
529  Scheduler_Node    *filter,
530  Chain_Node_order   order
531)
532{
533  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
534  Chain_Control *scheduled = &self->Scheduled;
535  Scheduler_Node *lowest_scheduled =
536    (Scheduler_Node *) _Chain_Last( scheduled );
537
538  (void) filter;
539  (void) order;
540
541  _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
542
543  return lowest_scheduled;
544}
545
546static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
547  Scheduler_Context                *context,
548  Scheduler_Node                   *node,
549  Scheduler_Node                   *lowest_scheduled,
550  Scheduler_SMP_Insert              insert_scheduled,
551  Scheduler_SMP_Move                move_from_scheduled_to_ready,
552  Scheduler_SMP_Allocate_processor  allocate_processor
553)
554{
555  Thread_Control *needs_help;
556  Scheduler_Try_to_schedule_action action;
557
558  action = _Scheduler_Try_to_schedule_node(
559    context,
560    node,
561    _Scheduler_Node_get_idle( lowest_scheduled ),
562    _Scheduler_SMP_Get_idle_thread
563  );
564
565  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
566    Thread_Control *lowest_scheduled_user =
567      _Scheduler_Node_get_user( lowest_scheduled );
568    Thread_Control *idle;
569
570    _Scheduler_SMP_Node_change_state(
571      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
572      SCHEDULER_SMP_NODE_READY
573    );
574    _Scheduler_Thread_change_state(
575      lowest_scheduled_user,
576      THREAD_SCHEDULER_READY
577    );
578
579    _Scheduler_SMP_Allocate_processor(
580      context,
581      node,
582      lowest_scheduled,
583      allocate_processor
584    );
585
586    ( *insert_scheduled )( context, node );
587    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
588
589    idle = _Scheduler_Release_idle_thread(
590      context,
591      lowest_scheduled,
592      _Scheduler_SMP_Release_idle_thread
593    );
594    if ( idle == NULL ) {
595      needs_help = lowest_scheduled_user;
596    } else {
597      needs_help = NULL;
598    }
599  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
600    _Scheduler_SMP_Node_change_state(
601      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
602      SCHEDULER_SMP_NODE_READY
603    );
604    _Scheduler_SMP_Node_change_state(
605      _Scheduler_SMP_Node_downcast( node ),
606      SCHEDULER_SMP_NODE_SCHEDULED
607    );
608
609    ( *insert_scheduled )( context, node );
610    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
611
612    _Scheduler_Exchange_idle_thread(
613      node,
614      lowest_scheduled,
615      _Scheduler_Node_get_idle( lowest_scheduled )
616    );
617
618    needs_help = NULL;
619  } else {
620    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
621    _Scheduler_SMP_Node_change_state(
622      _Scheduler_SMP_Node_downcast( node ),
623      SCHEDULER_SMP_NODE_BLOCKED
624    );
625    needs_help = NULL;
626  }
627
628  return needs_help;
629}
630
631/**
632 * @brief Enqueues a node according to the specified order function.
633 *
634 * The node must not be in the scheduled state.
635 *
636 * @param[in] context The scheduler instance context.
637 * @param[in] node The node to enqueue.
638 * @param[in] needs_help The thread needing help in case the node cannot be
639 *   scheduled.
640 * @param[in] order The order function.
641 * @param[in] insert_ready Function to insert a node into the set of ready
642 *   nodes.
643 * @param[in] insert_scheduled Function to insert a node into the set of
644 *   scheduled nodes.
645 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
646 *   of scheduled nodes to the set of ready nodes.
647 * @param[in] get_lowest_scheduled Function to select the node from the
648 *   scheduled nodes to replace.  It may not be possible to find one, in this
649 *   case a pointer must be returned so that the order functions returns false
650 *   if this pointer is passed as the second argument to the order function.
651 * @param[in] allocate_processor Function to allocate a processor to a node
652 *   based on the rules of the scheduler.
653 */
654static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
655  Scheduler_Context                  *context,
656  Scheduler_Node                     *node,
657  Thread_Control                     *needs_help,
658  Chain_Node_order                    order,
659  Scheduler_SMP_Insert                insert_ready,
660  Scheduler_SMP_Insert                insert_scheduled,
661  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
662  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
663  Scheduler_SMP_Allocate_processor    allocate_processor
664)
665{
666  Scheduler_Node *lowest_scheduled =
667    ( *get_lowest_scheduled )( context, node, order );
668
669  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
670    needs_help = _Scheduler_SMP_Enqueue_to_scheduled(
671      context,
672      node,
673      lowest_scheduled,
674      insert_scheduled,
675      move_from_scheduled_to_ready,
676      allocate_processor
677    );
678  } else {
679    ( *insert_ready )( context, node );
680  }
681
682  return needs_help;
683}
684
685/**
686 * @brief Enqueues a scheduled node according to the specified order
687 * function.
688 *
689 * @param[in] context The scheduler instance context.
690 * @param[in] node The node to enqueue.
691 * @param[in] order The order function.
692 * @param[in] extract_from_ready Function to extract a node from the set of
693 *   ready nodes.
694 * @param[in] get_highest_ready Function to get the highest ready node.
695 * @param[in] insert_ready Function to insert a node into the set of ready
696 *   nodes.
697 * @param[in] insert_scheduled Function to insert a node into the set of
698 *   scheduled nodes.
699 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
700 *   of ready nodes to the set of scheduled nodes.
701 * @param[in] allocate_processor Function to allocate a processor to a node
702 *   based on the rules of the scheduler.
703 */
704static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
705  Scheduler_Context                *context,
706  Scheduler_Node                   *node,
707  Chain_Node_order                  order,
708  Scheduler_SMP_Extract             extract_from_ready,
709  Scheduler_SMP_Get_highest_ready   get_highest_ready,
710  Scheduler_SMP_Insert              insert_ready,
711  Scheduler_SMP_Insert              insert_scheduled,
712  Scheduler_SMP_Move                move_from_ready_to_scheduled,
713  Scheduler_SMP_Allocate_processor  allocate_processor
714)
715{
716  Thread_Control *needs_help;
717
718  do {
719    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
720
721    /*
722     * The node has been extracted from the scheduled chain.  We have to place
723     * it now on the scheduled or ready set.
724     */
725    if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
726      ( *insert_scheduled )( context, node );
727
728      needs_help = NULL;
729    } else {
730      Scheduler_Try_to_schedule_action action;
731
732      action = _Scheduler_Try_to_schedule_node(
733        context,
734        highest_ready,
735        _Scheduler_Node_get_idle( node ),
736        _Scheduler_SMP_Get_idle_thread
737      );
738
739      if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
740        Thread_Control *user = _Scheduler_Node_get_user( node );
741        Thread_Control *idle;
742
743        _Scheduler_SMP_Node_change_state(
744          _Scheduler_SMP_Node_downcast( node ),
745          SCHEDULER_SMP_NODE_READY
746        );
747        _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
748
749        _Scheduler_SMP_Allocate_processor(
750          context,
751          highest_ready,
752          node,
753          allocate_processor
754        );
755
756        ( *insert_ready )( context, node );
757        ( *move_from_ready_to_scheduled )( context, highest_ready );
758
759        idle = _Scheduler_Release_idle_thread(
760          context,
761          node,
762          _Scheduler_SMP_Release_idle_thread
763        );
764        if ( idle == NULL ) {
765          needs_help = user;
766        } else {
767          needs_help = NULL;
768        }
769      } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
770        _Scheduler_SMP_Node_change_state(
771          _Scheduler_SMP_Node_downcast( node ),
772          SCHEDULER_SMP_NODE_READY
773        );
774        _Scheduler_SMP_Node_change_state(
775          _Scheduler_SMP_Node_downcast( highest_ready ),
776          SCHEDULER_SMP_NODE_SCHEDULED
777        );
778
779        ( *insert_ready )( context, node );
780        ( *move_from_ready_to_scheduled )( context, highest_ready );
781
782        _Scheduler_Exchange_idle_thread(
783          highest_ready,
784          node,
785          _Scheduler_Node_get_idle( node )
786        );
787
788        needs_help = NULL;
789      } else {
790        _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
791
792        _Scheduler_SMP_Node_change_state(
793          _Scheduler_SMP_Node_downcast( highest_ready ),
794          SCHEDULER_SMP_NODE_BLOCKED
795        );
796
797        ( *extract_from_ready )( context, highest_ready );
798
799        continue;
800      }
801    }
802  } while ( false );
803
804  return needs_help;
805}
806
807static inline void _Scheduler_SMP_Extract_from_scheduled(
808  Scheduler_Node *node
809)
810{
811  _Chain_Extract_unprotected( &node->Node );
812}
813
814static inline void _Scheduler_SMP_Schedule_highest_ready(
815  Scheduler_Context                *context,
816  Scheduler_Node                   *victim,
817  Scheduler_SMP_Extract             extract_from_ready,
818  Scheduler_SMP_Get_highest_ready   get_highest_ready,
819  Scheduler_SMP_Move                move_from_ready_to_scheduled,
820  Scheduler_SMP_Allocate_processor  allocate_processor
821)
822{
823  do {
824    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
825    Scheduler_Try_to_schedule_action action;
826
827    action = _Scheduler_Try_to_schedule_node(
828      context,
829      highest_ready,
830      NULL,
831      _Scheduler_SMP_Get_idle_thread
832    );
833
834    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
835      _Scheduler_SMP_Allocate_processor(
836        context,
837        highest_ready,
838        victim,
839        allocate_processor
840      );
841
842      ( *move_from_ready_to_scheduled )( context, highest_ready );
843    } else {
844      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
845
846      _Scheduler_SMP_Node_change_state(
847        _Scheduler_SMP_Node_downcast( highest_ready ),
848        SCHEDULER_SMP_NODE_BLOCKED
849      );
850
851      ( *extract_from_ready )( context, highest_ready );
852
853      continue;
854    }
855  } while ( false );
856}
857
858/**
859 * @brief Blocks a thread.
860 *
861 * @param[in] context The scheduler instance context.
862 * @param[in] thread The thread of the scheduling operation.
863 * @param[in] extract_from_ready Function to extract a node from the set of
864 *   ready nodes.
865 * @param[in] get_highest_ready Function to get the highest ready node.
866 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
867 *   of ready nodes to the set of scheduled nodes.
868 */
869static inline void _Scheduler_SMP_Block(
870  Scheduler_Context                *context,
871  Thread_Control                   *thread,
872  Scheduler_SMP_Extract             extract_from_ready,
873  Scheduler_SMP_Get_highest_ready   get_highest_ready,
874  Scheduler_SMP_Move                move_from_ready_to_scheduled,
875  Scheduler_SMP_Allocate_processor  allocate_processor
876)
877{
878  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
879  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
880  bool block;
881
882  _Assert( is_scheduled || node->state == SCHEDULER_SMP_NODE_READY );
883
884  block = _Scheduler_Block_node(
885    context,
886    thread,
887    &node->Base,
888    is_scheduled,
889    _Scheduler_SMP_Get_idle_thread
890  );
891  if ( block ) {
892    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
893
894    if ( is_scheduled ) {
895      _Scheduler_SMP_Extract_from_scheduled( &node->Base );
896
897      _Scheduler_SMP_Schedule_highest_ready(
898        context,
899        &node->Base,
900        extract_from_ready,
901        get_highest_ready,
902        move_from_ready_to_scheduled,
903        allocate_processor
904      );
905    } else {
906      ( *extract_from_ready )( context, &node->Base );
907    }
908  }
909}
910
911static inline Thread_Control *_Scheduler_SMP_Unblock(
912  Scheduler_Context     *context,
913  Thread_Control        *thread,
914  Scheduler_SMP_Update   update,
915  Scheduler_SMP_Enqueue  enqueue_fifo
916)
917{
918  Scheduler_SMP_Node *node;
919  bool                is_scheduled;
920  bool                unblock;
921  Thread_Control     *needs_help;
922
923  node = _Scheduler_SMP_Thread_get_node( thread );
924  is_scheduled = ( node->state == SCHEDULER_SMP_NODE_SCHEDULED );
925  unblock = _Scheduler_Unblock_node(
926    context,
927    thread,
928    &node->Base,
929    is_scheduled,
930    _Scheduler_SMP_Release_idle_thread
931  );
932
933  if ( unblock ) {
934    Priority_Control new_priority;
935    bool             prepend_it;
936
937    new_priority = _Scheduler_Node_get_priority( &node->Base, &prepend_it );
938    (void) prepend_it;
939
940    if ( new_priority != node->priority ) {
941      ( *update )( context, &node->Base, new_priority );
942    }
943
944    if ( node->state == SCHEDULER_SMP_NODE_BLOCKED ) {
945      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
946
947      needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
948    } else {
949      _Assert( node->state == SCHEDULER_SMP_NODE_READY );
950      _Assert(
951        node->Base.help_state == SCHEDULER_HELP_ACTIVE_OWNER
952          || node->Base.help_state == SCHEDULER_HELP_ACTIVE_RIVAL
953      );
954      _Assert( node->Base.idle == NULL );
955
956      if ( node->Base.accepts_help == thread ) {
957        needs_help = thread;
958      } else {
959        needs_help = NULL;
960      }
961    }
962  } else {
963    needs_help = NULL;
964  }
965
966  return needs_help;
967}
968
969static inline Thread_Control *_Scheduler_SMP_Update_priority(
970  Scheduler_Context               *context,
971  Thread_Control                  *thread,
972  Scheduler_SMP_Extract            extract_from_ready,
973  Scheduler_SMP_Update             update,
974  Scheduler_SMP_Enqueue            enqueue_fifo,
975  Scheduler_SMP_Enqueue            enqueue_lifo,
976  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo,
977  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_lifo
978)
979{
980  Scheduler_SMP_Node *node;
981  Thread_Control     *needs_help;
982  Priority_Control    new_priority;
983  bool                prepend_it;
984
985  node = _Scheduler_SMP_Thread_get_own_node( thread );
986  new_priority = _Scheduler_Node_get_priority( &node->Base, &prepend_it );
987
988  if ( new_priority == node->priority ) {
989    /* Nothing to do */
990    return NULL;
991  }
992
993  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
994    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
995
996    ( *update )( context, &node->Base, new_priority );
997
998    if ( prepend_it ) {
999      needs_help = ( *enqueue_scheduled_lifo )( context, &node->Base );
1000    } else {
1001      needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
1002    }
1003  } else if ( node->state == SCHEDULER_SMP_NODE_READY ) {
1004    ( *extract_from_ready )( context, &node->Base );
1005
1006    ( *update )( context, &node->Base, new_priority );
1007
1008    if ( prepend_it ) {
1009      needs_help = ( *enqueue_lifo )( context, &node->Base, NULL );
1010    } else {
1011      needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
1012    }
1013  } else {
1014    ( *update )( context, &node->Base, new_priority );
1015
1016    needs_help = NULL;
1017  }
1018
1019  return needs_help;
1020}
1021
1022static inline Thread_Control *_Scheduler_SMP_Ask_for_help(
1023  Scheduler_Context                  *context,
1024  Thread_Control                     *offers_help,
1025  Thread_Control                     *needs_help,
1026  Scheduler_SMP_Enqueue               enqueue_fifo
1027)
1028{
1029  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help );
1030  Thread_Control *next_needs_help = NULL;
1031  Thread_Control *previous_accepts_help;
1032
1033  previous_accepts_help = node->Base.accepts_help;
1034  node->Base.accepts_help = needs_help;
1035
1036  switch ( node->state ) {
1037    case SCHEDULER_SMP_NODE_READY:
1038      next_needs_help =
1039        _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help );
1040      break;
1041    case SCHEDULER_SMP_NODE_SCHEDULED:
1042      next_needs_help = _Scheduler_Ask_scheduled_node_for_help(
1043        context,
1044        &node->Base,
1045        offers_help,
1046        needs_help,
1047        previous_accepts_help,
1048        _Scheduler_SMP_Release_idle_thread
1049      );
1050      break;
1051    case SCHEDULER_SMP_NODE_BLOCKED:
1052      if (
1053        _Scheduler_Ask_blocked_node_for_help(
1054          context,
1055          &node->Base,
1056          offers_help,
1057          needs_help
1058        )
1059      ) {
1060        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1061
1062        next_needs_help = ( *enqueue_fifo )(
1063          context,
1064          &node->Base,
1065          needs_help
1066        );
1067      }
1068      break;
1069  }
1070
1071  return next_needs_help;
1072}
1073
1074static inline Thread_Control *_Scheduler_SMP_Yield(
1075  Scheduler_Context               *context,
1076  Thread_Control                  *thread,
1077  Scheduler_SMP_Extract            extract_from_ready,
1078  Scheduler_SMP_Enqueue            enqueue_fifo,
1079  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo
1080)
1081{
1082  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
1083  Thread_Control *needs_help;
1084
1085  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1086    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
1087
1088    needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
1089  } else {
1090    ( *extract_from_ready )( context, &node->Base );
1091
1092    needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
1093  }
1094
1095  return needs_help;
1096}
1097
1098static inline void _Scheduler_SMP_Insert_scheduled_lifo(
1099  Scheduler_Context *context,
1100  Scheduler_Node    *node_to_insert
1101)
1102{
1103  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1104
1105  _Chain_Insert_ordered_unprotected(
1106    &self->Scheduled,
1107    &node_to_insert->Node,
1108    _Scheduler_SMP_Insert_priority_lifo_order
1109  );
1110}
1111
1112static inline void _Scheduler_SMP_Insert_scheduled_fifo(
1113  Scheduler_Context *context,
1114  Scheduler_Node    *node_to_insert
1115)
1116{
1117  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1118
1119  _Chain_Insert_ordered_unprotected(
1120    &self->Scheduled,
1121    &node_to_insert->Node,
1122    _Scheduler_SMP_Insert_priority_fifo_order
1123  );
1124}
1125
1126/** @} */
1127
1128#ifdef __cplusplus
1129}
1130#endif /* __cplusplus */
1131
1132#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.