source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ 05ca53d

5
Last change on this file since 05ca53d was 05ca53d, checked in by Sebastian Huber <sebastian.huber@…>, on 10/31/16 at 12:08:33

rtems: Add scheduler processor add/remove

Update #2797.

  • Property mode set to 100644
File size: 36.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30#include <rtems/bspIo.h>
31
32#ifdef __cplusplus
33extern "C" {
34#endif /* __cplusplus */
35
36/**
37 * @addtogroup ScoreSchedulerSMP
38 *
39 * The scheduler nodes can be in four states
40 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
41 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
42 * - @ref SCHEDULER_SMP_NODE_READY.
43 *
44 * State transitions are triggered via basic operations
45 * - _Scheduler_SMP_Enqueue_ordered(),
46 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
47 * - _Scheduler_SMP_Block().
48 *
49 * @dot
50 * digraph {
51 *   node [style="filled"];
52 *
53 *   bs [label="BLOCKED"];
54 *   ss [label="SCHEDULED", fillcolor="green"];
55 *   rs [label="READY", fillcolor="red"];
56 *
57 *   edge [label="enqueue"];
58 *   edge [fontcolor="darkgreen", color="darkgreen"];
59 *
60 *   bs -> ss;
61 *
62 *   edge [fontcolor="red", color="red"];
63 *
64 *   bs -> rs;
65 *
66 *   edge [label="enqueue other"];
67 *
68 *   ss -> rs;
69 *
70 *   edge [label="block"];
71 *   edge [fontcolor="black", color="black"];
72 *
73 *   ss -> bs;
74 *   rs -> bs;
75 *
76 *   edge [label="block other"];
77 *   edge [fontcolor="darkgreen", color="darkgreen"];
78 *
79 *   rs -> ss;
80 * }
81 * @enddot
82 *
83 * During system initialization each processor of the scheduler instance starts
84 * with an idle thread assigned to it.  Lets have a look at an example with two
85 * idle threads I and J with priority 5.  We also have blocked threads A, B and
86 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
87 * with respect to the thread priority from left to right in the below
88 * diagrams.  The highest priority node (lowest priority number) is the
89 * leftmost node.  Since the processor assignment is independent of the thread
90 * priority the processor indices may move from one state to the other.
91 *
92 * @dot
93 * digraph {
94 *   node [style="filled"];
95 *   edge [dir="none"];
96 *   subgraph {
97 *     rank = same;
98 *
99 *     i [label="I (5)", fillcolor="green"];
100 *     j [label="J (5)", fillcolor="green"];
101 *     a [label="A (1)"];
102 *     b [label="B (2)"];
103 *     c [label="C (3)"];
104 *     i -> j;
105 *   }
106 *
107 *   subgraph {
108 *     rank = same;
109 *
110 *     p0 [label="PROCESSOR 0", shape="box"];
111 *     p1 [label="PROCESSOR 1", shape="box"];
112 *   }
113 *
114 *   i -> p0;
115 *   j -> p1;
116 * }
117 * @enddot
118 *
119 * Lets start A.  For this an enqueue operation is performed.
120 *
121 * @dot
122 * digraph {
123 *   node [style="filled"];
124 *   edge [dir="none"];
125 *
126 *   subgraph {
127 *     rank = same;
128 *
129 *     i [label="I (5)", fillcolor="green"];
130 *     j [label="J (5)", fillcolor="red"];
131 *     a [label="A (1)", fillcolor="green"];
132 *     b [label="B (2)"];
133 *     c [label="C (3)"];
134 *     a -> i;
135 *   }
136 *
137 *   subgraph {
138 *     rank = same;
139 *
140 *     p0 [label="PROCESSOR 0", shape="box"];
141 *     p1 [label="PROCESSOR 1", shape="box"];
142 *   }
143 *
144 *   i -> p0;
145 *   a -> p1;
146 * }
147 * @enddot
148 *
149 * Lets start C.
150 *
151 * @dot
152 * digraph {
153 *   node [style="filled"];
154 *   edge [dir="none"];
155 *
156 *   subgraph {
157 *     rank = same;
158 *
159 *     a [label="A (1)", fillcolor="green"];
160 *     c [label="C (3)", fillcolor="green"];
161 *     i [label="I (5)", fillcolor="red"];
162 *     j [label="J (5)", fillcolor="red"];
163 *     b [label="B (2)"];
164 *     a -> c;
165 *     i -> j;
166 *   }
167 *
168 *   subgraph {
169 *     rank = same;
170 *
171 *     p0 [label="PROCESSOR 0", shape="box"];
172 *     p1 [label="PROCESSOR 1", shape="box"];
173 *   }
174 *
175 *   c -> p0;
176 *   a -> p1;
177 * }
178 * @enddot
179 *
180 * Lets start B.
181 *
182 * @dot
183 * digraph {
184 *   node [style="filled"];
185 *   edge [dir="none"];
186 *
187 *   subgraph {
188 *     rank = same;
189 *
190 *     a [label="A (1)", fillcolor="green"];
191 *     b [label="B (2)", fillcolor="green"];
192 *     c [label="C (3)", fillcolor="red"];
193 *     i [label="I (5)", fillcolor="red"];
194 *     j [label="J (5)", fillcolor="red"];
195 *     a -> b;
196 *     c -> i -> j;
197 *   }
198 *
199 *   subgraph {
200 *     rank = same;
201 *
202 *     p0 [label="PROCESSOR 0", shape="box"];
203 *     p1 [label="PROCESSOR 1", shape="box"];
204 *   }
205 *
206 *   b -> p0;
207 *   a -> p1;
208 * }
209 * @enddot
210 *
211 * Lets change the priority of thread A to 4.
212 *
213 * @dot
214 * digraph {
215 *   node [style="filled"];
216 *   edge [dir="none"];
217 *
218 *   subgraph {
219 *     rank = same;
220 *
221 *     b [label="B (2)", fillcolor="green"];
222 *     c [label="C (3)", fillcolor="green"];
223 *     a [label="A (4)", fillcolor="red"];
224 *     i [label="I (5)", fillcolor="red"];
225 *     j [label="J (5)", fillcolor="red"];
226 *     b -> c;
227 *     a -> i -> j;
228 *   }
229 *
230 *   subgraph {
231 *     rank = same;
232 *
233 *     p0 [label="PROCESSOR 0", shape="box"];
234 *     p1 [label="PROCESSOR 1", shape="box"];
235 *   }
236 *
237 *   b -> p0;
238 *   c -> p1;
239 * }
240 * @enddot
241 *
242 * Now perform a blocking operation with thread B.  Please note that thread A
243 * migrated now from processor 0 to processor 1 and thread C still executes on
244 * processor 1.
245 *
246 * @dot
247 * digraph {
248 *   node [style="filled"];
249 *   edge [dir="none"];
250 *
251 *   subgraph {
252 *     rank = same;
253 *
254 *     c [label="C (3)", fillcolor="green"];
255 *     a [label="A (4)", fillcolor="green"];
256 *     i [label="I (5)", fillcolor="red"];
257 *     j [label="J (5)", fillcolor="red"];
258 *     b [label="B (2)"];
259 *     c -> a;
260 *     i -> j;
261 *   }
262 *
263 *   subgraph {
264 *     rank = same;
265 *
266 *     p0 [label="PROCESSOR 0", shape="box"];
267 *     p1 [label="PROCESSOR 1", shape="box"];
268 *   }
269 *
270 *   a -> p0;
271 *   c -> p1;
272 * }
273 * @enddot
274 *
275 * @{
276 */
277
278typedef bool ( *Scheduler_SMP_Has_ready )(
279  Scheduler_Context *context
280);
281
282typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
283  Scheduler_Context *context,
284  Scheduler_Node    *node
285);
286
287typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
288  Scheduler_Context *context,
289  Scheduler_Node    *filter,
290  Chain_Node_order   order
291);
292
293typedef void ( *Scheduler_SMP_Extract )(
294  Scheduler_Context *context,
295  Scheduler_Node    *node_to_extract
296);
297
298typedef void ( *Scheduler_SMP_Insert )(
299  Scheduler_Context *context,
300  Scheduler_Node    *node_to_insert
301);
302
303typedef void ( *Scheduler_SMP_Move )(
304  Scheduler_Context *context,
305  Scheduler_Node    *node_to_move
306);
307
308typedef bool ( *Scheduler_SMP_Ask_for_help )(
309  Scheduler_Context *context,
310  Thread_Control    *thread,
311  Scheduler_Node    *node
312);
313
314typedef void ( *Scheduler_SMP_Update )(
315  Scheduler_Context *context,
316  Scheduler_Node    *node_to_update,
317  Priority_Control   new_priority
318);
319
320typedef bool ( *Scheduler_SMP_Enqueue )(
321  Scheduler_Context *context,
322  Scheduler_Node    *node_to_enqueue
323);
324
325typedef void ( *Scheduler_SMP_Allocate_processor )(
326  Scheduler_Context *context,
327  Thread_Control    *scheduled_thread,
328  Thread_Control    *victim_thread,
329  Per_CPU_Control   *victim_cpu
330);
331
332static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
333  const Chain_Node *to_insert,
334  const Chain_Node *next
335)
336{
337  const Scheduler_SMP_Node *node_to_insert =
338    (const Scheduler_SMP_Node *) to_insert;
339  const Scheduler_SMP_Node *node_next =
340    (const Scheduler_SMP_Node *) next;
341
342  return node_to_insert->priority <= node_next->priority;
343}
344
345static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
346  const Chain_Node *to_insert,
347  const Chain_Node *next
348)
349{
350  const Scheduler_SMP_Node *node_to_insert =
351    (const Scheduler_SMP_Node *) to_insert;
352  const Scheduler_SMP_Node *node_next =
353    (const Scheduler_SMP_Node *) next;
354
355  return node_to_insert->priority < node_next->priority;
356}
357
358static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
359  Scheduler_Context *context
360)
361{
362  return (Scheduler_SMP_Context *) context;
363}
364
365static inline void _Scheduler_SMP_Initialize(
366  Scheduler_SMP_Context *self
367)
368{
369  _Chain_Initialize_empty( &self->Scheduled );
370  _Chain_Initialize_empty( &self->Idle_threads );
371}
372
373static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
374  Thread_Control *thread
375)
376{
377  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
378}
379
380static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
381  Thread_Control *thread
382)
383{
384  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
385}
386
387static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
388  Scheduler_Node *node
389)
390{
391  return (Scheduler_SMP_Node *) node;
392}
393
394static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
395  const Scheduler_Node *node
396)
397{
398  return ( (const Scheduler_SMP_Node *) node )->state;
399}
400
401static inline Priority_Control _Scheduler_SMP_Node_priority(
402  const Scheduler_Node *node
403)
404{
405  return ( (const Scheduler_SMP_Node *) node )->priority;
406}
407
408static inline void _Scheduler_SMP_Node_initialize(
409  const Scheduler_Control *scheduler,
410  Scheduler_SMP_Node      *node,
411  Thread_Control          *thread,
412  Priority_Control         priority
413)
414{
415  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
416  node->state = SCHEDULER_SMP_NODE_BLOCKED;
417  node->priority = priority;
418}
419
420static inline void _Scheduler_SMP_Node_update_priority(
421  Scheduler_SMP_Node *node,
422  Priority_Control    new_priority
423)
424{
425  node->priority = new_priority;
426}
427
428extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
429
430static inline void _Scheduler_SMP_Node_change_state(
431  Scheduler_Node           *node,
432  Scheduler_SMP_Node_state  new_state
433)
434{
435  Scheduler_SMP_Node *the_node;
436
437  the_node = _Scheduler_SMP_Node_downcast( node );
438  _Assert(
439    _Scheduler_SMP_Node_valid_state_changes[ the_node->state ][ new_state ]
440  );
441
442  the_node->state = new_state;
443}
444
445static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
446  const Scheduler_Context *context,
447  const Per_CPU_Control   *cpu
448)
449{
450  return cpu->Scheduler.context == context;
451}
452
453static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
454  Scheduler_Context *context
455)
456{
457  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
458  Thread_Control *idle = (Thread_Control *)
459    _Chain_Get_first_unprotected( &self->Idle_threads );
460
461  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
462
463  return idle;
464}
465
466static inline void _Scheduler_SMP_Release_idle_thread(
467  Scheduler_Context *context,
468  Thread_Control    *idle
469)
470{
471  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
472
473  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
474}
475
476static inline void _Scheduler_SMP_Exctract_idle_thread(
477  Thread_Control *idle
478)
479{
480  _Chain_Extract_unprotected( &idle->Object.Node );
481}
482
483static inline void _Scheduler_SMP_Allocate_processor_lazy(
484  Scheduler_Context *context,
485  Thread_Control    *scheduled_thread,
486  Thread_Control    *victim_thread,
487  Per_CPU_Control   *victim_cpu
488)
489{
490  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
491  Per_CPU_Control *cpu_self = _Per_CPU_Get();
492  Thread_Control *heir;
493
494  _Assert( _ISR_Get_level() != 0 );
495
496  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
497    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
498      heir = scheduled_cpu->heir;
499      _Thread_Dispatch_update_heir(
500        cpu_self,
501        scheduled_cpu,
502        scheduled_thread
503      );
504    } else {
505      /* We have to force a migration to our processor set */
506      heir = scheduled_thread;
507    }
508  } else {
509    heir = scheduled_thread;
510  }
511
512  if ( heir != victim_thread ) {
513    _Thread_Set_CPU( heir, victim_cpu );
514    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
515  }
516}
517
518/*
519 * This method is slightly different from
520 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
521 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
522 * but does not take into account affinity.
523 */
524static inline void _Scheduler_SMP_Allocate_processor_exact(
525  Scheduler_Context *context,
526  Thread_Control    *scheduled_thread,
527  Thread_Control    *victim_thread,
528  Per_CPU_Control   *victim_cpu
529)
530{
531  Per_CPU_Control *cpu_self = _Per_CPU_Get();
532
533  (void) context;
534
535  _Thread_Set_CPU( scheduled_thread, victim_cpu );
536  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
537}
538
539static inline void _Scheduler_SMP_Allocate_processor(
540  Scheduler_Context                *context,
541  Scheduler_Node                   *scheduled,
542  Thread_Control                   *victim_thread,
543  Per_CPU_Control                  *victim_cpu,
544  Scheduler_SMP_Allocate_processor  allocate_processor
545)
546{
547  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
548
549  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
550
551  ( *allocate_processor )(
552    context,
553    scheduled_thread,
554    victim_thread,
555    victim_cpu
556  );
557}
558
559static inline Thread_Control *_Scheduler_SMP_Preempt(
560  Scheduler_Context                *context,
561  Scheduler_Node                   *scheduled,
562  Scheduler_Node                   *victim,
563  Scheduler_SMP_Allocate_processor  allocate_processor
564)
565{
566  Thread_Control   *victim_thread;
567  ISR_lock_Context  lock_context;
568  Per_CPU_Control  *victim_cpu;
569
570  victim_thread = _Scheduler_Node_get_user( victim );
571  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
572
573  _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
574
575  victim_cpu = _Thread_Get_CPU( victim_thread );
576
577  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
578    _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
579
580    if ( victim_thread->Scheduler.helping_nodes > 0 ) {
581      _Per_CPU_Acquire( victim_cpu );
582      _Chain_Append_unprotected(
583        &victim_cpu->Threads_in_need_for_help,
584        &victim_thread->Scheduler.Help_node
585      );
586      _Per_CPU_Release( victim_cpu );
587    }
588  }
589
590  _Thread_Scheduler_release_critical( victim_thread, &lock_context );
591
592  _Scheduler_SMP_Allocate_processor(
593    context,
594    scheduled,
595    victim_thread,
596    victim_cpu,
597    allocate_processor
598  );
599
600  return victim_thread;
601}
602
603static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
604  Scheduler_Context *context,
605  Scheduler_Node    *filter,
606  Chain_Node_order   order
607)
608{
609  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
610  Chain_Control *scheduled = &self->Scheduled;
611  Scheduler_Node *lowest_scheduled =
612    (Scheduler_Node *) _Chain_Last( scheduled );
613
614  (void) filter;
615  (void) order;
616
617  _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
618  _Assert(
619    _Chain_Next( &lowest_scheduled->Node ) == _Chain_Tail( scheduled )
620  );
621
622  return lowest_scheduled;
623}
624
625static inline void _Scheduler_SMP_Enqueue_to_scheduled(
626  Scheduler_Context                *context,
627  Scheduler_Node                   *node,
628  Scheduler_Node                   *lowest_scheduled,
629  Scheduler_SMP_Insert              insert_scheduled,
630  Scheduler_SMP_Move                move_from_scheduled_to_ready,
631  Scheduler_SMP_Allocate_processor  allocate_processor
632)
633{
634  Scheduler_Try_to_schedule_action action;
635
636  action = _Scheduler_Try_to_schedule_node(
637    context,
638    node,
639    _Scheduler_Node_get_idle( lowest_scheduled ),
640    _Scheduler_SMP_Get_idle_thread
641  );
642
643  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
644    _Scheduler_SMP_Preempt(
645      context,
646      node,
647      lowest_scheduled,
648      allocate_processor
649    );
650
651    ( *insert_scheduled )( context, node );
652    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
653
654    _Scheduler_Release_idle_thread(
655      context,
656      lowest_scheduled,
657      _Scheduler_SMP_Release_idle_thread
658    );
659  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
660    _Scheduler_SMP_Node_change_state(
661      lowest_scheduled,
662      SCHEDULER_SMP_NODE_READY
663    );
664    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
665
666    ( *insert_scheduled )( context, node );
667    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
668
669    _Scheduler_Exchange_idle_thread(
670      node,
671      lowest_scheduled,
672      _Scheduler_Node_get_idle( lowest_scheduled )
673    );
674  } else {
675    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
676    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
677  }
678}
679
680/**
681 * @brief Enqueues a node according to the specified order function.
682 *
683 * The node must not be in the scheduled state.
684 *
685 * @param[in] context The scheduler instance context.
686 * @param[in] node The node to enqueue.
687 * @param[in] order The order function.
688 * @param[in] insert_ready Function to insert a node into the set of ready
689 *   nodes.
690 * @param[in] insert_scheduled Function to insert a node into the set of
691 *   scheduled nodes.
692 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
693 *   of scheduled nodes to the set of ready nodes.
694 * @param[in] get_lowest_scheduled Function to select the node from the
695 *   scheduled nodes to replace.  It may not be possible to find one, in this
696 *   case a pointer must be returned so that the order functions returns false
697 *   if this pointer is passed as the second argument to the order function.
698 * @param[in] allocate_processor Function to allocate a processor to a node
699 *   based on the rules of the scheduler.
700 */
701static inline bool _Scheduler_SMP_Enqueue_ordered(
702  Scheduler_Context                  *context,
703  Scheduler_Node                     *node,
704  Chain_Node_order                    order,
705  Scheduler_SMP_Insert                insert_ready,
706  Scheduler_SMP_Insert                insert_scheduled,
707  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
708  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
709  Scheduler_SMP_Allocate_processor    allocate_processor
710)
711{
712  bool            needs_help;
713  Scheduler_Node *lowest_scheduled;
714
715  lowest_scheduled = ( *get_lowest_scheduled )( context, node, order );
716
717  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
718    _Scheduler_SMP_Enqueue_to_scheduled(
719      context,
720      node,
721      lowest_scheduled,
722      insert_scheduled,
723      move_from_scheduled_to_ready,
724      allocate_processor
725    );
726    needs_help = false;
727  } else {
728    ( *insert_ready )( context, node );
729    needs_help = true;
730  }
731
732  return needs_help;
733}
734
735/**
736 * @brief Enqueues a scheduled node according to the specified order
737 * function.
738 *
739 * @param[in] context The scheduler instance context.
740 * @param[in] node The node to enqueue.
741 * @param[in] order The order function.
742 * @param[in] extract_from_ready Function to extract a node from the set of
743 *   ready nodes.
744 * @param[in] get_highest_ready Function to get the highest ready node.
745 * @param[in] insert_ready Function to insert a node into the set of ready
746 *   nodes.
747 * @param[in] insert_scheduled Function to insert a node into the set of
748 *   scheduled nodes.
749 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
750 *   of ready nodes to the set of scheduled nodes.
751 * @param[in] allocate_processor Function to allocate a processor to a node
752 *   based on the rules of the scheduler.
753 */
754static inline bool _Scheduler_SMP_Enqueue_scheduled_ordered(
755  Scheduler_Context                *context,
756  Scheduler_Node                   *node,
757  Chain_Node_order                  order,
758  Scheduler_SMP_Extract             extract_from_ready,
759  Scheduler_SMP_Get_highest_ready   get_highest_ready,
760  Scheduler_SMP_Insert              insert_ready,
761  Scheduler_SMP_Insert              insert_scheduled,
762  Scheduler_SMP_Move                move_from_ready_to_scheduled,
763  Scheduler_SMP_Allocate_processor  allocate_processor
764)
765{
766  while ( true ) {
767    Scheduler_Node                   *highest_ready;
768    Scheduler_Try_to_schedule_action  action;
769
770    highest_ready = ( *get_highest_ready )( context, node );
771
772    /*
773     * The node has been extracted from the scheduled chain.  We have to place
774     * it now on the scheduled or ready set.
775     */
776    if (
777      node->sticky_level > 0
778        && ( *order )( &node->Node, &highest_ready->Node )
779    ) {
780      ( *insert_scheduled )( context, node );
781
782      if ( _Scheduler_Node_get_idle( node ) != NULL ) {
783        Thread_Control   *owner;
784        ISR_lock_Context  lock_context;
785
786        owner = _Scheduler_Node_get_owner( node );
787        _Thread_Scheduler_acquire_critical( owner, &lock_context );
788
789        if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
790          _Thread_Scheduler_cancel_need_for_help(
791            owner,
792            _Thread_Get_CPU( owner )
793          );
794          _Scheduler_Discard_idle_thread(
795            context,
796            owner,
797            node,
798            _Scheduler_SMP_Release_idle_thread
799          );
800          _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
801        }
802
803        _Thread_Scheduler_release_critical( owner, &lock_context );
804      }
805
806      return false;
807    }
808
809    action = _Scheduler_Try_to_schedule_node(
810      context,
811      highest_ready,
812      _Scheduler_Node_get_idle( node ),
813      _Scheduler_SMP_Get_idle_thread
814    );
815
816    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
817      Thread_Control *idle;
818
819      _Scheduler_SMP_Preempt(
820        context,
821        highest_ready,
822        node,
823        allocate_processor
824      );
825
826      ( *insert_ready )( context, node );
827      ( *move_from_ready_to_scheduled )( context, highest_ready );
828
829      idle = _Scheduler_Release_idle_thread(
830        context,
831        node,
832        _Scheduler_SMP_Release_idle_thread
833      );
834      return ( idle == NULL );
835    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
836      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
837      _Scheduler_SMP_Node_change_state(
838        highest_ready,
839        SCHEDULER_SMP_NODE_SCHEDULED
840      );
841
842      ( *insert_ready )( context, node );
843      ( *move_from_ready_to_scheduled )( context, highest_ready );
844
845      _Scheduler_Exchange_idle_thread(
846        highest_ready,
847        node,
848        _Scheduler_Node_get_idle( node )
849      );
850      return false;
851    } else {
852      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
853
854      _Scheduler_SMP_Node_change_state(
855        highest_ready,
856        SCHEDULER_SMP_NODE_BLOCKED
857      );
858
859      ( *extract_from_ready )( context, highest_ready );
860    }
861  }
862}
863
864static inline void _Scheduler_SMP_Extract_from_scheduled(
865  Scheduler_Node *node
866)
867{
868  _Chain_Extract_unprotected( &node->Node );
869}
870
871static inline void _Scheduler_SMP_Schedule_highest_ready(
872  Scheduler_Context                *context,
873  Scheduler_Node                   *victim,
874  Per_CPU_Control                  *victim_cpu,
875  Scheduler_SMP_Extract             extract_from_ready,
876  Scheduler_SMP_Get_highest_ready   get_highest_ready,
877  Scheduler_SMP_Move                move_from_ready_to_scheduled,
878  Scheduler_SMP_Allocate_processor  allocate_processor
879)
880{
881  Scheduler_Try_to_schedule_action action;
882
883  do {
884    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
885
886    action = _Scheduler_Try_to_schedule_node(
887      context,
888      highest_ready,
889      NULL,
890      _Scheduler_SMP_Get_idle_thread
891    );
892
893    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
894      _Scheduler_SMP_Allocate_processor(
895        context,
896        highest_ready,
897        _Scheduler_Node_get_user( victim ),
898        victim_cpu,
899        allocate_processor
900      );
901
902      ( *move_from_ready_to_scheduled )( context, highest_ready );
903    } else {
904      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
905
906      _Scheduler_SMP_Node_change_state(
907        highest_ready,
908        SCHEDULER_SMP_NODE_BLOCKED
909      );
910
911      ( *extract_from_ready )( context, highest_ready );
912    }
913  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
914}
915
916/**
917 * @brief Blocks a thread.
918 *
919 * @param[in] context The scheduler instance context.
920 * @param[in] thread The thread of the scheduling operation.
921 * @param[in] node The scheduler node of the thread to block.
922 * @param[in] extract_from_ready Function to extract a node from the set of
923 *   ready nodes.
924 * @param[in] get_highest_ready Function to get the highest ready node.
925 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
926 *   of ready nodes to the set of scheduled nodes.
927 */
928static inline void _Scheduler_SMP_Block(
929  Scheduler_Context                *context,
930  Thread_Control                   *thread,
931  Scheduler_Node                   *node,
932  Scheduler_SMP_Extract             extract_from_ready,
933  Scheduler_SMP_Get_highest_ready   get_highest_ready,
934  Scheduler_SMP_Move                move_from_ready_to_scheduled,
935  Scheduler_SMP_Allocate_processor  allocate_processor
936)
937{
938  Scheduler_SMP_Node_state  node_state;
939  Per_CPU_Control          *thread_cpu;
940
941  node_state = _Scheduler_SMP_Node_state( node );
942
943  thread_cpu = _Scheduler_Block_node(
944    context,
945    thread,
946    node,
947    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
948    _Scheduler_SMP_Get_idle_thread
949  );
950
951  if ( thread_cpu != NULL ) {
952    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
953
954    if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
955      _Scheduler_SMP_Extract_from_scheduled( node );
956      _Scheduler_SMP_Schedule_highest_ready(
957        context,
958        node,
959        thread_cpu,
960        extract_from_ready,
961        get_highest_ready,
962        move_from_ready_to_scheduled,
963        allocate_processor
964      );
965    } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
966      ( *extract_from_ready )( context, node );
967    }
968  }
969}
970
971static inline bool _Scheduler_SMP_Unblock(
972  Scheduler_Context     *context,
973  Thread_Control        *thread,
974  Scheduler_Node        *node,
975  Scheduler_SMP_Update   update,
976  Scheduler_SMP_Enqueue  enqueue_fifo
977)
978{
979  Scheduler_SMP_Node_state  node_state;
980  bool                      unblock;
981  bool                      needs_help;
982
983  node_state = _Scheduler_SMP_Node_state( node );
984  unblock = _Scheduler_Unblock_node(
985    context,
986    thread,
987    node,
988    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
989    _Scheduler_SMP_Release_idle_thread
990  );
991
992  if ( unblock ) {
993    Priority_Control new_priority;
994    bool             prepend_it;
995
996    new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
997    (void) prepend_it;
998
999    if ( new_priority != _Scheduler_SMP_Node_priority( node ) ) {
1000      ( *update )( context, node, new_priority );
1001    }
1002
1003    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1004      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1005
1006      needs_help = ( *enqueue_fifo )( context, node );
1007    } else {
1008      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1009      _Assert( node->sticky_level > 0 );
1010      _Assert( node->idle == NULL );
1011      needs_help = true;
1012    }
1013  } else {
1014    needs_help = false;
1015  }
1016
1017  return needs_help;
1018}
1019
1020static inline void _Scheduler_SMP_Update_priority(
1021  Scheduler_Context          *context,
1022  Thread_Control             *thread,
1023  Scheduler_Node             *node,
1024  Scheduler_SMP_Extract       extract_from_ready,
1025  Scheduler_SMP_Update        update,
1026  Scheduler_SMP_Enqueue       enqueue_fifo,
1027  Scheduler_SMP_Enqueue       enqueue_lifo,
1028  Scheduler_SMP_Enqueue       enqueue_scheduled_fifo,
1029  Scheduler_SMP_Enqueue       enqueue_scheduled_lifo,
1030  Scheduler_SMP_Ask_for_help  ask_for_help
1031)
1032{
1033  Priority_Control         new_priority;
1034  bool                     prepend_it;
1035  Scheduler_SMP_Node_state node_state;
1036
1037  new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
1038
1039  if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) {
1040    if ( _Thread_Is_ready( thread ) ) {
1041      ( *ask_for_help )( context, thread, node );
1042    }
1043
1044    return;
1045  }
1046
1047  node_state = _Scheduler_SMP_Node_state( node );
1048
1049  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1050    _Scheduler_SMP_Extract_from_scheduled( node );
1051
1052    ( *update )( context, node, new_priority );
1053
1054    if ( prepend_it ) {
1055      ( *enqueue_scheduled_lifo )( context, node );
1056    } else {
1057      ( *enqueue_scheduled_fifo )( context, node );
1058    }
1059  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1060    ( *extract_from_ready )( context, node );
1061
1062    ( *update )( context, node, new_priority );
1063
1064    if ( prepend_it ) {
1065      ( *enqueue_lifo )( context, node );
1066    } else {
1067      ( *enqueue_fifo )( context, node );
1068    }
1069  } else {
1070    ( *update )( context, node, new_priority );
1071
1072    if ( _Thread_Is_ready( thread ) ) {
1073      ( *ask_for_help )( context, thread, node );
1074    }
1075  }
1076}
1077
1078static inline bool _Scheduler_SMP_Yield(
1079  Scheduler_Context     *context,
1080  Thread_Control        *thread,
1081  Scheduler_Node        *node,
1082  Scheduler_SMP_Extract  extract_from_ready,
1083  Scheduler_SMP_Enqueue  enqueue_fifo,
1084  Scheduler_SMP_Enqueue  enqueue_scheduled_fifo
1085)
1086{
1087  bool                     needs_help;
1088  Scheduler_SMP_Node_state node_state;
1089
1090  node_state = _Scheduler_SMP_Node_state( node );
1091
1092  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1093    _Scheduler_SMP_Extract_from_scheduled( node );
1094
1095    needs_help = ( *enqueue_scheduled_fifo )( context, node );
1096  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1097    ( *extract_from_ready )( context, node );
1098
1099    needs_help = ( *enqueue_fifo )( context, node );
1100  } else {
1101    needs_help = true;
1102  }
1103
1104  return needs_help;
1105}
1106
1107static inline void _Scheduler_SMP_Insert_scheduled_lifo(
1108  Scheduler_Context *context,
1109  Scheduler_Node    *node_to_insert
1110)
1111{
1112  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1113
1114  _Chain_Insert_ordered_unprotected(
1115    &self->Scheduled,
1116    &node_to_insert->Node,
1117    _Scheduler_SMP_Insert_priority_lifo_order
1118  );
1119}
1120
1121static inline void _Scheduler_SMP_Insert_scheduled_fifo(
1122  Scheduler_Context *context,
1123  Scheduler_Node    *node_to_insert
1124)
1125{
1126  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1127
1128  _Chain_Insert_ordered_unprotected(
1129    &self->Scheduled,
1130    &node_to_insert->Node,
1131    _Scheduler_SMP_Insert_priority_fifo_order
1132  );
1133}
1134
1135static inline bool _Scheduler_SMP_Ask_for_help(
1136  Scheduler_Context                  *context,
1137  Thread_Control                     *thread,
1138  Scheduler_Node                     *node,
1139  Chain_Node_order                    order,
1140  Scheduler_SMP_Insert                insert_ready,
1141  Scheduler_SMP_Insert                insert_scheduled,
1142  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1143  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1144  Scheduler_SMP_Allocate_processor    allocate_processor
1145)
1146{
1147  Scheduler_Node   *lowest_scheduled;
1148  ISR_lock_Context  lock_context;
1149  bool              success;
1150
1151  lowest_scheduled = ( *get_lowest_scheduled )( context, node, order );
1152
1153  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1154
1155  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1156    Scheduler_SMP_Node_state node_state;
1157
1158    node_state = _Scheduler_SMP_Node_state( node );
1159
1160    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1161      if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
1162        _Thread_Scheduler_cancel_need_for_help(
1163          thread,
1164          _Thread_Get_CPU( thread )
1165        );
1166        _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1167        _Thread_Scheduler_release_critical( thread, &lock_context );
1168
1169        _Scheduler_SMP_Preempt(
1170          context,
1171          node,
1172          lowest_scheduled,
1173          allocate_processor
1174        );
1175
1176        ( *insert_scheduled )( context, node );
1177        ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1178
1179        _Scheduler_Release_idle_thread(
1180          context,
1181          lowest_scheduled,
1182          _Scheduler_SMP_Release_idle_thread
1183        );
1184        success = true;
1185      } else {
1186        _Thread_Scheduler_release_critical( thread, &lock_context );
1187        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1188        ( *insert_ready )( context, node );
1189        success = false;
1190      }
1191    } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1192      _Thread_Scheduler_cancel_need_for_help(
1193        thread,
1194        _Thread_Get_CPU( thread )
1195      );
1196      _Scheduler_Discard_idle_thread(
1197        context,
1198        thread,
1199        node,
1200        _Scheduler_SMP_Release_idle_thread
1201      );
1202      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1203      _Thread_Scheduler_release_critical( thread, &lock_context );
1204      success = true;
1205    } else {
1206      _Thread_Scheduler_release_critical( thread, &lock_context );
1207      success = false;
1208    }
1209  } else {
1210    _Thread_Scheduler_release_critical( thread, &lock_context );
1211    success = false;
1212  }
1213
1214  return success;
1215}
1216
1217static inline void _Scheduler_SMP_Reconsider_help_request(
1218  Scheduler_Context     *context,
1219  Thread_Control        *thread,
1220  Scheduler_Node        *node,
1221  Scheduler_SMP_Extract  extract_from_ready
1222)
1223{
1224  ISR_lock_Context lock_context;
1225
1226  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1227
1228  if (
1229    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1230      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1231      && node->sticky_level == 1
1232  ) {
1233    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1234    ( *extract_from_ready )( context, node );
1235  }
1236
1237  _Thread_Scheduler_release_critical( thread, &lock_context );
1238}
1239
1240static inline void _Scheduler_SMP_Withdraw_node(
1241  Scheduler_Context                *context,
1242  Thread_Control                   *thread,
1243  Scheduler_Node                   *node,
1244  Thread_Scheduler_state            next_state,
1245  Scheduler_SMP_Extract             extract_from_ready,
1246  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1247  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1248  Scheduler_SMP_Allocate_processor  allocate_processor
1249)
1250{
1251  ISR_lock_Context         lock_context;
1252  Scheduler_SMP_Node_state node_state;
1253
1254  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1255
1256  node_state = _Scheduler_SMP_Node_state( node );
1257  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1258
1259  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1260    Per_CPU_Control *thread_cpu;
1261
1262    thread_cpu = _Thread_Get_CPU( thread );
1263    _Scheduler_Thread_change_state( thread, next_state );
1264    _Thread_Scheduler_release_critical( thread, &lock_context );
1265
1266    _Scheduler_SMP_Extract_from_scheduled( node );
1267    _Scheduler_SMP_Schedule_highest_ready(
1268      context,
1269      node,
1270      thread_cpu,
1271      extract_from_ready,
1272      get_highest_ready,
1273      move_from_ready_to_scheduled,
1274      allocate_processor
1275    );
1276  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1277    _Thread_Scheduler_release_critical( thread, &lock_context );
1278    ( *extract_from_ready )( context, node );
1279  } else {
1280    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1281    _Thread_Scheduler_release_critical( thread, &lock_context );
1282  }
1283}
1284
1285static inline void _Scheduler_SMP_Add_processor(
1286  Scheduler_Context       *context,
1287  Thread_Control          *idle,
1288  Scheduler_SMP_Has_ready  has_ready,
1289  Scheduler_SMP_Enqueue    enqueue_scheduled_fifo
1290)
1291{
1292  Scheduler_SMP_Context *self;
1293  Scheduler_Node        *node;
1294
1295  self = _Scheduler_SMP_Get_self( context );
1296  idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1297  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1298  node = _Thread_Scheduler_get_home_node( idle );
1299  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
1300
1301  if ( ( *has_ready )( &self->Base ) ) {
1302    ( *enqueue_scheduled_fifo )( &self->Base, node );
1303  } else {
1304    _Chain_Append_unprotected( &self->Scheduled, &node->Node );
1305  }
1306}
1307
1308static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1309  Scheduler_Context     *context,
1310  Per_CPU_Control       *cpu,
1311  Scheduler_SMP_Extract  extract_from_ready,
1312  Scheduler_SMP_Enqueue  enqueue_fifo
1313)
1314{
1315  Scheduler_SMP_Context *self;
1316  Chain_Node            *chain_node;
1317  Scheduler_Node        *victim_node;
1318  Thread_Control        *victim_user;
1319  Thread_Control        *victim_owner;
1320  Thread_Control        *idle;
1321
1322  self = _Scheduler_SMP_Get_self( context );
1323  chain_node = _Chain_First( &self->Scheduled );
1324
1325  do {
1326    _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1327    victim_node = (Scheduler_Node *) chain_node;
1328    victim_user = _Scheduler_Node_get_user( victim_node );
1329    chain_node = _Chain_Next( chain_node );
1330  } while ( _Thread_Get_CPU( victim_user ) != cpu );
1331
1332  _Scheduler_SMP_Extract_from_scheduled( victim_node );
1333  victim_owner = _Scheduler_Node_get_owner( victim_node );
1334
1335  if ( !victim_owner->is_idle ) {
1336    Scheduler_Node *idle_node;
1337
1338    _Scheduler_Release_idle_thread(
1339      &self->Base,
1340      victim_node,
1341      _Scheduler_SMP_Release_idle_thread
1342    );
1343    idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
1344    idle_node = _Thread_Scheduler_get_home_node( idle );
1345    ( *extract_from_ready )( &self->Base, idle_node );
1346    _Scheduler_SMP_Preempt(
1347      &self->Base,
1348      idle_node,
1349      victim_node,
1350      _Scheduler_SMP_Allocate_processor_exact
1351    );
1352
1353    if ( !_Chain_Is_empty( &self->Scheduled ) ) {
1354      ( *enqueue_fifo )( context, victim_node );
1355    }
1356  } else {
1357    _Assert( victim_owner == victim_user );
1358    _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
1359    idle = victim_owner;
1360    _Scheduler_SMP_Exctract_idle_thread( idle );
1361  }
1362
1363  return idle;
1364}
1365
1366/** @} */
1367
1368#ifdef __cplusplus
1369}
1370#endif /* __cplusplus */
1371
1372#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.