source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ d19dc071

5
Last change on this file since d19dc071 was d19dc071, checked in by Sebastian Huber <sebastian.huber@…>, on 07/07/17 at 07:40:06

score: Pass scheduler nodes to processor allocator

This allows scheduler implementations to easily access
scheduler-specific data.

Update #3059.

  • Property mode set to 100644
File size: 36.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30#include <rtems/bspIo.h>
31
32#ifdef __cplusplus
33extern "C" {
34#endif /* __cplusplus */
35
36/**
37 * @addtogroup ScoreSchedulerSMP
38 *
39 * The scheduler nodes can be in four states
40 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
41 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
42 * - @ref SCHEDULER_SMP_NODE_READY.
43 *
44 * State transitions are triggered via basic operations
45 * - _Scheduler_SMP_Enqueue_ordered(),
46 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
47 * - _Scheduler_SMP_Block().
48 *
49 * @dot
50 * digraph {
51 *   node [style="filled"];
52 *
53 *   bs [label="BLOCKED"];
54 *   ss [label="SCHEDULED", fillcolor="green"];
55 *   rs [label="READY", fillcolor="red"];
56 *
57 *   edge [label="enqueue"];
58 *   edge [fontcolor="darkgreen", color="darkgreen"];
59 *
60 *   bs -> ss;
61 *
62 *   edge [fontcolor="red", color="red"];
63 *
64 *   bs -> rs;
65 *
66 *   edge [label="enqueue other"];
67 *
68 *   ss -> rs;
69 *
70 *   edge [label="block"];
71 *   edge [fontcolor="black", color="black"];
72 *
73 *   ss -> bs;
74 *   rs -> bs;
75 *
76 *   edge [label="block other"];
77 *   edge [fontcolor="darkgreen", color="darkgreen"];
78 *
79 *   rs -> ss;
80 * }
81 * @enddot
82 *
83 * During system initialization each processor of the scheduler instance starts
84 * with an idle thread assigned to it.  Lets have a look at an example with two
85 * idle threads I and J with priority 5.  We also have blocked threads A, B and
86 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
87 * with respect to the thread priority from left to right in the below
88 * diagrams.  The highest priority node (lowest priority number) is the
89 * leftmost node.  Since the processor assignment is independent of the thread
90 * priority the processor indices may move from one state to the other.
91 *
92 * @dot
93 * digraph {
94 *   node [style="filled"];
95 *   edge [dir="none"];
96 *   subgraph {
97 *     rank = same;
98 *
99 *     i [label="I (5)", fillcolor="green"];
100 *     j [label="J (5)", fillcolor="green"];
101 *     a [label="A (1)"];
102 *     b [label="B (2)"];
103 *     c [label="C (3)"];
104 *     i -> j;
105 *   }
106 *
107 *   subgraph {
108 *     rank = same;
109 *
110 *     p0 [label="PROCESSOR 0", shape="box"];
111 *     p1 [label="PROCESSOR 1", shape="box"];
112 *   }
113 *
114 *   i -> p0;
115 *   j -> p1;
116 * }
117 * @enddot
118 *
119 * Lets start A.  For this an enqueue operation is performed.
120 *
121 * @dot
122 * digraph {
123 *   node [style="filled"];
124 *   edge [dir="none"];
125 *
126 *   subgraph {
127 *     rank = same;
128 *
129 *     i [label="I (5)", fillcolor="green"];
130 *     j [label="J (5)", fillcolor="red"];
131 *     a [label="A (1)", fillcolor="green"];
132 *     b [label="B (2)"];
133 *     c [label="C (3)"];
134 *     a -> i;
135 *   }
136 *
137 *   subgraph {
138 *     rank = same;
139 *
140 *     p0 [label="PROCESSOR 0", shape="box"];
141 *     p1 [label="PROCESSOR 1", shape="box"];
142 *   }
143 *
144 *   i -> p0;
145 *   a -> p1;
146 * }
147 * @enddot
148 *
149 * Lets start C.
150 *
151 * @dot
152 * digraph {
153 *   node [style="filled"];
154 *   edge [dir="none"];
155 *
156 *   subgraph {
157 *     rank = same;
158 *
159 *     a [label="A (1)", fillcolor="green"];
160 *     c [label="C (3)", fillcolor="green"];
161 *     i [label="I (5)", fillcolor="red"];
162 *     j [label="J (5)", fillcolor="red"];
163 *     b [label="B (2)"];
164 *     a -> c;
165 *     i -> j;
166 *   }
167 *
168 *   subgraph {
169 *     rank = same;
170 *
171 *     p0 [label="PROCESSOR 0", shape="box"];
172 *     p1 [label="PROCESSOR 1", shape="box"];
173 *   }
174 *
175 *   c -> p0;
176 *   a -> p1;
177 * }
178 * @enddot
179 *
180 * Lets start B.
181 *
182 * @dot
183 * digraph {
184 *   node [style="filled"];
185 *   edge [dir="none"];
186 *
187 *   subgraph {
188 *     rank = same;
189 *
190 *     a [label="A (1)", fillcolor="green"];
191 *     b [label="B (2)", fillcolor="green"];
192 *     c [label="C (3)", fillcolor="red"];
193 *     i [label="I (5)", fillcolor="red"];
194 *     j [label="J (5)", fillcolor="red"];
195 *     a -> b;
196 *     c -> i -> j;
197 *   }
198 *
199 *   subgraph {
200 *     rank = same;
201 *
202 *     p0 [label="PROCESSOR 0", shape="box"];
203 *     p1 [label="PROCESSOR 1", shape="box"];
204 *   }
205 *
206 *   b -> p0;
207 *   a -> p1;
208 * }
209 * @enddot
210 *
211 * Lets change the priority of thread A to 4.
212 *
213 * @dot
214 * digraph {
215 *   node [style="filled"];
216 *   edge [dir="none"];
217 *
218 *   subgraph {
219 *     rank = same;
220 *
221 *     b [label="B (2)", fillcolor="green"];
222 *     c [label="C (3)", fillcolor="green"];
223 *     a [label="A (4)", fillcolor="red"];
224 *     i [label="I (5)", fillcolor="red"];
225 *     j [label="J (5)", fillcolor="red"];
226 *     b -> c;
227 *     a -> i -> j;
228 *   }
229 *
230 *   subgraph {
231 *     rank = same;
232 *
233 *     p0 [label="PROCESSOR 0", shape="box"];
234 *     p1 [label="PROCESSOR 1", shape="box"];
235 *   }
236 *
237 *   b -> p0;
238 *   c -> p1;
239 * }
240 * @enddot
241 *
242 * Now perform a blocking operation with thread B.  Please note that thread A
243 * migrated now from processor 0 to processor 1 and thread C still executes on
244 * processor 1.
245 *
246 * @dot
247 * digraph {
248 *   node [style="filled"];
249 *   edge [dir="none"];
250 *
251 *   subgraph {
252 *     rank = same;
253 *
254 *     c [label="C (3)", fillcolor="green"];
255 *     a [label="A (4)", fillcolor="green"];
256 *     i [label="I (5)", fillcolor="red"];
257 *     j [label="J (5)", fillcolor="red"];
258 *     b [label="B (2)"];
259 *     c -> a;
260 *     i -> j;
261 *   }
262 *
263 *   subgraph {
264 *     rank = same;
265 *
266 *     p0 [label="PROCESSOR 0", shape="box"];
267 *     p1 [label="PROCESSOR 1", shape="box"];
268 *   }
269 *
270 *   a -> p0;
271 *   c -> p1;
272 * }
273 * @enddot
274 *
275 * @{
276 */
277
278typedef bool ( *Scheduler_SMP_Has_ready )(
279  Scheduler_Context *context
280);
281
282typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
283  Scheduler_Context *context,
284  Scheduler_Node    *node
285);
286
287typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
288  Scheduler_Context *context,
289  Scheduler_Node    *filter,
290  Chain_Node_order   order
291);
292
293typedef void ( *Scheduler_SMP_Extract )(
294  Scheduler_Context *context,
295  Scheduler_Node    *node_to_extract
296);
297
298typedef void ( *Scheduler_SMP_Insert )(
299  Scheduler_Context *context,
300  Scheduler_Node    *node_to_insert
301);
302
303typedef void ( *Scheduler_SMP_Move )(
304  Scheduler_Context *context,
305  Scheduler_Node    *node_to_move
306);
307
308typedef bool ( *Scheduler_SMP_Ask_for_help )(
309  Scheduler_Context *context,
310  Thread_Control    *thread,
311  Scheduler_Node    *node
312);
313
314typedef void ( *Scheduler_SMP_Update )(
315  Scheduler_Context *context,
316  Scheduler_Node    *node_to_update,
317  Priority_Control   new_priority
318);
319
320typedef bool ( *Scheduler_SMP_Enqueue )(
321  Scheduler_Context *context,
322  Scheduler_Node    *node_to_enqueue
323);
324
325typedef void ( *Scheduler_SMP_Allocate_processor )(
326  Scheduler_Context *context,
327  Scheduler_Node    *scheduled,
328  Scheduler_Node    *victim,
329  Per_CPU_Control   *victim_cpu
330);
331
332static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
333  const Chain_Node *to_insert,
334  const Chain_Node *next
335)
336{
337  const Scheduler_SMP_Node *node_to_insert =
338    (const Scheduler_SMP_Node *) to_insert;
339  const Scheduler_SMP_Node *node_next =
340    (const Scheduler_SMP_Node *) next;
341
342  return node_to_insert->priority <= node_next->priority;
343}
344
345static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
346  const Chain_Node *to_insert,
347  const Chain_Node *next
348)
349{
350  const Scheduler_SMP_Node *node_to_insert =
351    (const Scheduler_SMP_Node *) to_insert;
352  const Scheduler_SMP_Node *node_next =
353    (const Scheduler_SMP_Node *) next;
354
355  return node_to_insert->priority < node_next->priority;
356}
357
358static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
359  Scheduler_Context *context
360)
361{
362  return (Scheduler_SMP_Context *) context;
363}
364
365static inline void _Scheduler_SMP_Initialize(
366  Scheduler_SMP_Context *self
367)
368{
369  _Chain_Initialize_empty( &self->Scheduled );
370  _Chain_Initialize_empty( &self->Idle_threads );
371}
372
373static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
374  Thread_Control *thread
375)
376{
377  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
378}
379
380static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
381  Thread_Control *thread
382)
383{
384  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
385}
386
387static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
388  Scheduler_Node *node
389)
390{
391  return (Scheduler_SMP_Node *) node;
392}
393
394static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
395  const Scheduler_Node *node
396)
397{
398  return ( (const Scheduler_SMP_Node *) node )->state;
399}
400
401static inline Priority_Control _Scheduler_SMP_Node_priority(
402  const Scheduler_Node *node
403)
404{
405  return ( (const Scheduler_SMP_Node *) node )->priority;
406}
407
408static inline void _Scheduler_SMP_Node_initialize(
409  const Scheduler_Control *scheduler,
410  Scheduler_SMP_Node      *node,
411  Thread_Control          *thread,
412  Priority_Control         priority
413)
414{
415  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
416  node->state = SCHEDULER_SMP_NODE_BLOCKED;
417  node->priority = priority;
418}
419
420static inline void _Scheduler_SMP_Node_update_priority(
421  Scheduler_SMP_Node *node,
422  Priority_Control    new_priority
423)
424{
425  node->priority = new_priority;
426}
427
428static inline void _Scheduler_SMP_Node_change_state(
429  Scheduler_Node           *node,
430  Scheduler_SMP_Node_state  new_state
431)
432{
433  Scheduler_SMP_Node *the_node;
434
435  the_node = _Scheduler_SMP_Node_downcast( node );
436  the_node->state = new_state;
437}
438
439static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
440  const Scheduler_Context *context,
441  const Per_CPU_Control   *cpu
442)
443{
444  return cpu->Scheduler.context == context;
445}
446
447static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
448  Scheduler_Context *context
449)
450{
451  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
452  Thread_Control *idle = (Thread_Control *)
453    _Chain_Get_first_unprotected( &self->Idle_threads );
454
455  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
456
457  return idle;
458}
459
460static inline void _Scheduler_SMP_Release_idle_thread(
461  Scheduler_Context *context,
462  Thread_Control    *idle
463)
464{
465  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
466
467  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
468}
469
470static inline void _Scheduler_SMP_Exctract_idle_thread(
471  Thread_Control *idle
472)
473{
474  _Chain_Extract_unprotected( &idle->Object.Node );
475}
476
477static inline void _Scheduler_SMP_Allocate_processor_lazy(
478  Scheduler_Context *context,
479  Scheduler_Node    *scheduled,
480  Scheduler_Node    *victim,
481  Per_CPU_Control   *victim_cpu
482)
483{
484  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
485  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
486  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
487  Per_CPU_Control *cpu_self = _Per_CPU_Get();
488  Thread_Control *heir;
489
490  _Assert( _ISR_Get_level() != 0 );
491
492  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
493    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
494      heir = scheduled_cpu->heir;
495      _Thread_Dispatch_update_heir(
496        cpu_self,
497        scheduled_cpu,
498        scheduled_thread
499      );
500    } else {
501      /* We have to force a migration to our processor set */
502      heir = scheduled_thread;
503    }
504  } else {
505    heir = scheduled_thread;
506  }
507
508  if ( heir != victim_thread ) {
509    _Thread_Set_CPU( heir, victim_cpu );
510    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
511  }
512}
513
514/*
515 * This method is slightly different from
516 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
517 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
518 * but does not take into account affinity.
519 */
520static inline void _Scheduler_SMP_Allocate_processor_exact(
521  Scheduler_Context *context,
522  Scheduler_Node    *scheduled,
523  Scheduler_Node    *victim,
524  Per_CPU_Control   *victim_cpu
525)
526{
527  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
528  Per_CPU_Control *cpu_self = _Per_CPU_Get();
529
530  (void) context;
531  (void) victim;
532
533  _Thread_Set_CPU( scheduled_thread, victim_cpu );
534  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
535}
536
537static inline void _Scheduler_SMP_Allocate_processor(
538  Scheduler_Context                *context,
539  Scheduler_Node                   *scheduled,
540  Scheduler_Node                   *victim,
541  Per_CPU_Control                  *victim_cpu,
542  Scheduler_SMP_Allocate_processor  allocate_processor
543)
544{
545  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
546  ( *allocate_processor )( context, scheduled, victim, victim_cpu );
547}
548
549static inline Thread_Control *_Scheduler_SMP_Preempt(
550  Scheduler_Context                *context,
551  Scheduler_Node                   *scheduled,
552  Scheduler_Node                   *victim,
553  Scheduler_SMP_Allocate_processor  allocate_processor
554)
555{
556  Thread_Control   *victim_thread;
557  ISR_lock_Context  lock_context;
558  Per_CPU_Control  *victim_cpu;
559
560  victim_thread = _Scheduler_Node_get_user( victim );
561  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
562
563  _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
564
565  victim_cpu = _Thread_Get_CPU( victim_thread );
566
567  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
568    _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
569
570    if ( victim_thread->Scheduler.helping_nodes > 0 ) {
571      _Per_CPU_Acquire( victim_cpu );
572      _Chain_Append_unprotected(
573        &victim_cpu->Threads_in_need_for_help,
574        &victim_thread->Scheduler.Help_node
575      );
576      _Per_CPU_Release( victim_cpu );
577    }
578  }
579
580  _Thread_Scheduler_release_critical( victim_thread, &lock_context );
581
582  _Scheduler_SMP_Allocate_processor(
583    context,
584    scheduled,
585    victim,
586    victim_cpu,
587    allocate_processor
588  );
589
590  return victim_thread;
591}
592
593static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
594  Scheduler_Context *context,
595  Scheduler_Node    *filter,
596  Chain_Node_order   order
597)
598{
599  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
600  Chain_Control *scheduled = &self->Scheduled;
601  Scheduler_Node *lowest_scheduled =
602    (Scheduler_Node *) _Chain_Last( scheduled );
603
604  (void) filter;
605  (void) order;
606
607  _Assert( &lowest_scheduled->Node.Chain != _Chain_Tail( scheduled ) );
608  _Assert(
609    _Chain_Next( &lowest_scheduled->Node.Chain ) == _Chain_Tail( scheduled )
610  );
611
612  return lowest_scheduled;
613}
614
615static inline void _Scheduler_SMP_Enqueue_to_scheduled(
616  Scheduler_Context                *context,
617  Scheduler_Node                   *node,
618  Scheduler_Node                   *lowest_scheduled,
619  Scheduler_SMP_Insert              insert_scheduled,
620  Scheduler_SMP_Move                move_from_scheduled_to_ready,
621  Scheduler_SMP_Allocate_processor  allocate_processor
622)
623{
624  Scheduler_Try_to_schedule_action action;
625
626  action = _Scheduler_Try_to_schedule_node(
627    context,
628    node,
629    _Scheduler_Node_get_idle( lowest_scheduled ),
630    _Scheduler_SMP_Get_idle_thread
631  );
632
633  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
634    _Scheduler_SMP_Preempt(
635      context,
636      node,
637      lowest_scheduled,
638      allocate_processor
639    );
640
641    ( *insert_scheduled )( context, node );
642    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
643
644    _Scheduler_Release_idle_thread(
645      context,
646      lowest_scheduled,
647      _Scheduler_SMP_Release_idle_thread
648    );
649  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
650    _Scheduler_SMP_Node_change_state(
651      lowest_scheduled,
652      SCHEDULER_SMP_NODE_READY
653    );
654    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
655
656    ( *insert_scheduled )( context, node );
657    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
658
659    _Scheduler_Exchange_idle_thread(
660      node,
661      lowest_scheduled,
662      _Scheduler_Node_get_idle( lowest_scheduled )
663    );
664  } else {
665    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
666    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
667  }
668}
669
670/**
671 * @brief Enqueues a node according to the specified order function.
672 *
673 * The node must not be in the scheduled state.
674 *
675 * @param[in] context The scheduler instance context.
676 * @param[in] node The node to enqueue.
677 * @param[in] order The order function.
678 * @param[in] insert_ready Function to insert a node into the set of ready
679 *   nodes.
680 * @param[in] insert_scheduled Function to insert a node into the set of
681 *   scheduled nodes.
682 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
683 *   of scheduled nodes to the set of ready nodes.
684 * @param[in] get_lowest_scheduled Function to select the node from the
685 *   scheduled nodes to replace.  It may not be possible to find one, in this
686 *   case a pointer must be returned so that the order functions returns false
687 *   if this pointer is passed as the second argument to the order function.
688 * @param[in] allocate_processor Function to allocate a processor to a node
689 *   based on the rules of the scheduler.
690 */
691static inline bool _Scheduler_SMP_Enqueue_ordered(
692  Scheduler_Context                  *context,
693  Scheduler_Node                     *node,
694  Chain_Node_order                    order,
695  Scheduler_SMP_Insert                insert_ready,
696  Scheduler_SMP_Insert                insert_scheduled,
697  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
698  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
699  Scheduler_SMP_Allocate_processor    allocate_processor
700)
701{
702  bool            needs_help;
703  Scheduler_Node *lowest_scheduled;
704
705  lowest_scheduled = ( *get_lowest_scheduled )( context, node, order );
706
707  if ( ( *order )( &node->Node.Chain, &lowest_scheduled->Node.Chain ) ) {
708    _Scheduler_SMP_Enqueue_to_scheduled(
709      context,
710      node,
711      lowest_scheduled,
712      insert_scheduled,
713      move_from_scheduled_to_ready,
714      allocate_processor
715    );
716    needs_help = false;
717  } else {
718    ( *insert_ready )( context, node );
719    needs_help = true;
720  }
721
722  return needs_help;
723}
724
725/**
726 * @brief Enqueues a scheduled node according to the specified order
727 * function.
728 *
729 * @param[in] context The scheduler instance context.
730 * @param[in] node The node to enqueue.
731 * @param[in] order The order function.
732 * @param[in] extract_from_ready Function to extract a node from the set of
733 *   ready nodes.
734 * @param[in] get_highest_ready Function to get the highest ready node.
735 * @param[in] insert_ready Function to insert a node into the set of ready
736 *   nodes.
737 * @param[in] insert_scheduled Function to insert a node into the set of
738 *   scheduled nodes.
739 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
740 *   of ready nodes to the set of scheduled nodes.
741 * @param[in] allocate_processor Function to allocate a processor to a node
742 *   based on the rules of the scheduler.
743 */
744static inline bool _Scheduler_SMP_Enqueue_scheduled_ordered(
745  Scheduler_Context                *context,
746  Scheduler_Node                   *node,
747  Chain_Node_order                  order,
748  Scheduler_SMP_Extract             extract_from_ready,
749  Scheduler_SMP_Get_highest_ready   get_highest_ready,
750  Scheduler_SMP_Insert              insert_ready,
751  Scheduler_SMP_Insert              insert_scheduled,
752  Scheduler_SMP_Move                move_from_ready_to_scheduled,
753  Scheduler_SMP_Allocate_processor  allocate_processor
754)
755{
756  while ( true ) {
757    Scheduler_Node                   *highest_ready;
758    Scheduler_Try_to_schedule_action  action;
759
760    highest_ready = ( *get_highest_ready )( context, node );
761
762    /*
763     * The node has been extracted from the scheduled chain.  We have to place
764     * it now on the scheduled or ready set.
765     */
766    if (
767      node->sticky_level > 0
768        && ( *order )( &node->Node.Chain, &highest_ready->Node.Chain )
769    ) {
770      ( *insert_scheduled )( context, node );
771
772      if ( _Scheduler_Node_get_idle( node ) != NULL ) {
773        Thread_Control   *owner;
774        ISR_lock_Context  lock_context;
775
776        owner = _Scheduler_Node_get_owner( node );
777        _Thread_Scheduler_acquire_critical( owner, &lock_context );
778
779        if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
780          _Thread_Scheduler_cancel_need_for_help(
781            owner,
782            _Thread_Get_CPU( owner )
783          );
784          _Scheduler_Discard_idle_thread(
785            context,
786            owner,
787            node,
788            _Scheduler_SMP_Release_idle_thread
789          );
790          _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
791        }
792
793        _Thread_Scheduler_release_critical( owner, &lock_context );
794      }
795
796      return false;
797    }
798
799    action = _Scheduler_Try_to_schedule_node(
800      context,
801      highest_ready,
802      _Scheduler_Node_get_idle( node ),
803      _Scheduler_SMP_Get_idle_thread
804    );
805
806    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
807      Thread_Control *idle;
808
809      _Scheduler_SMP_Preempt(
810        context,
811        highest_ready,
812        node,
813        allocate_processor
814      );
815
816      ( *insert_ready )( context, node );
817      ( *move_from_ready_to_scheduled )( context, highest_ready );
818
819      idle = _Scheduler_Release_idle_thread(
820        context,
821        node,
822        _Scheduler_SMP_Release_idle_thread
823      );
824      return ( idle == NULL );
825    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
826      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
827      _Scheduler_SMP_Node_change_state(
828        highest_ready,
829        SCHEDULER_SMP_NODE_SCHEDULED
830      );
831
832      ( *insert_ready )( context, node );
833      ( *move_from_ready_to_scheduled )( context, highest_ready );
834
835      _Scheduler_Exchange_idle_thread(
836        highest_ready,
837        node,
838        _Scheduler_Node_get_idle( node )
839      );
840      return false;
841    } else {
842      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
843
844      _Scheduler_SMP_Node_change_state(
845        highest_ready,
846        SCHEDULER_SMP_NODE_BLOCKED
847      );
848
849      ( *extract_from_ready )( context, highest_ready );
850    }
851  }
852}
853
854static inline void _Scheduler_SMP_Extract_from_scheduled(
855  Scheduler_Node *node
856)
857{
858  _Chain_Extract_unprotected( &node->Node.Chain );
859}
860
861static inline void _Scheduler_SMP_Schedule_highest_ready(
862  Scheduler_Context                *context,
863  Scheduler_Node                   *victim,
864  Per_CPU_Control                  *victim_cpu,
865  Scheduler_SMP_Extract             extract_from_ready,
866  Scheduler_SMP_Get_highest_ready   get_highest_ready,
867  Scheduler_SMP_Move                move_from_ready_to_scheduled,
868  Scheduler_SMP_Allocate_processor  allocate_processor
869)
870{
871  Scheduler_Try_to_schedule_action action;
872
873  do {
874    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
875
876    action = _Scheduler_Try_to_schedule_node(
877      context,
878      highest_ready,
879      NULL,
880      _Scheduler_SMP_Get_idle_thread
881    );
882
883    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
884      _Scheduler_SMP_Allocate_processor(
885        context,
886        highest_ready,
887        victim,
888        victim_cpu,
889        allocate_processor
890      );
891
892      ( *move_from_ready_to_scheduled )( context, highest_ready );
893    } else {
894      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
895
896      _Scheduler_SMP_Node_change_state(
897        highest_ready,
898        SCHEDULER_SMP_NODE_BLOCKED
899      );
900
901      ( *extract_from_ready )( context, highest_ready );
902    }
903  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
904}
905
906/**
907 * @brief Blocks a thread.
908 *
909 * @param[in] context The scheduler instance context.
910 * @param[in] thread The thread of the scheduling operation.
911 * @param[in] node The scheduler node of the thread to block.
912 * @param[in] extract_from_ready Function to extract a node from the set of
913 *   ready nodes.
914 * @param[in] get_highest_ready Function to get the highest ready node.
915 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
916 *   of ready nodes to the set of scheduled nodes.
917 */
918static inline void _Scheduler_SMP_Block(
919  Scheduler_Context                *context,
920  Thread_Control                   *thread,
921  Scheduler_Node                   *node,
922  Scheduler_SMP_Extract             extract_from_ready,
923  Scheduler_SMP_Get_highest_ready   get_highest_ready,
924  Scheduler_SMP_Move                move_from_ready_to_scheduled,
925  Scheduler_SMP_Allocate_processor  allocate_processor
926)
927{
928  Scheduler_SMP_Node_state  node_state;
929  Per_CPU_Control          *thread_cpu;
930
931  node_state = _Scheduler_SMP_Node_state( node );
932
933  thread_cpu = _Scheduler_Block_node(
934    context,
935    thread,
936    node,
937    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
938    _Scheduler_SMP_Get_idle_thread
939  );
940
941  if ( thread_cpu != NULL ) {
942    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
943
944    if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
945      _Scheduler_SMP_Extract_from_scheduled( node );
946      _Scheduler_SMP_Schedule_highest_ready(
947        context,
948        node,
949        thread_cpu,
950        extract_from_ready,
951        get_highest_ready,
952        move_from_ready_to_scheduled,
953        allocate_processor
954      );
955    } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
956      ( *extract_from_ready )( context, node );
957    }
958  }
959}
960
961static inline void _Scheduler_SMP_Unblock(
962  Scheduler_Context     *context,
963  Thread_Control        *thread,
964  Scheduler_Node        *node,
965  Scheduler_SMP_Update   update,
966  Scheduler_SMP_Enqueue  enqueue_fifo
967)
968{
969  Scheduler_SMP_Node_state  node_state;
970  bool                      unblock;
971
972  node_state = _Scheduler_SMP_Node_state( node );
973  unblock = _Scheduler_Unblock_node(
974    context,
975    thread,
976    node,
977    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
978    _Scheduler_SMP_Release_idle_thread
979  );
980
981  if ( unblock ) {
982    Priority_Control new_priority;
983    bool             prepend_it;
984    bool             needs_help;
985
986    new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
987    (void) prepend_it;
988
989    if ( new_priority != _Scheduler_SMP_Node_priority( node ) ) {
990      ( *update )( context, node, new_priority );
991    }
992
993    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
994      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
995
996      needs_help = ( *enqueue_fifo )( context, node );
997    } else {
998      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
999      _Assert( node->sticky_level > 0 );
1000      _Assert( node->idle == NULL );
1001      needs_help = true;
1002    }
1003
1004    if ( needs_help ) {
1005      _Scheduler_Ask_for_help( thread );
1006    }
1007  }
1008}
1009
1010static inline void _Scheduler_SMP_Update_priority(
1011  Scheduler_Context          *context,
1012  Thread_Control             *thread,
1013  Scheduler_Node             *node,
1014  Scheduler_SMP_Extract       extract_from_ready,
1015  Scheduler_SMP_Update        update,
1016  Scheduler_SMP_Enqueue       enqueue_fifo,
1017  Scheduler_SMP_Enqueue       enqueue_lifo,
1018  Scheduler_SMP_Enqueue       enqueue_scheduled_fifo,
1019  Scheduler_SMP_Enqueue       enqueue_scheduled_lifo,
1020  Scheduler_SMP_Ask_for_help  ask_for_help
1021)
1022{
1023  Priority_Control         new_priority;
1024  bool                     prepend_it;
1025  Scheduler_SMP_Node_state node_state;
1026
1027  new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
1028
1029  if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) {
1030    if ( _Thread_Is_ready( thread ) ) {
1031      ( *ask_for_help )( context, thread, node );
1032    }
1033
1034    return;
1035  }
1036
1037  node_state = _Scheduler_SMP_Node_state( node );
1038
1039  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1040    _Scheduler_SMP_Extract_from_scheduled( node );
1041
1042    ( *update )( context, node, new_priority );
1043
1044    if ( prepend_it ) {
1045      ( *enqueue_scheduled_lifo )( context, node );
1046    } else {
1047      ( *enqueue_scheduled_fifo )( context, node );
1048    }
1049  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1050    ( *extract_from_ready )( context, node );
1051
1052    ( *update )( context, node, new_priority );
1053
1054    if ( prepend_it ) {
1055      ( *enqueue_lifo )( context, node );
1056    } else {
1057      ( *enqueue_fifo )( context, node );
1058    }
1059  } else {
1060    ( *update )( context, node, new_priority );
1061
1062    if ( _Thread_Is_ready( thread ) ) {
1063      ( *ask_for_help )( context, thread, node );
1064    }
1065  }
1066}
1067
1068static inline void _Scheduler_SMP_Yield(
1069  Scheduler_Context     *context,
1070  Thread_Control        *thread,
1071  Scheduler_Node        *node,
1072  Scheduler_SMP_Extract  extract_from_ready,
1073  Scheduler_SMP_Enqueue  enqueue_fifo,
1074  Scheduler_SMP_Enqueue  enqueue_scheduled_fifo
1075)
1076{
1077  bool                     needs_help;
1078  Scheduler_SMP_Node_state node_state;
1079
1080  node_state = _Scheduler_SMP_Node_state( node );
1081
1082  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1083    _Scheduler_SMP_Extract_from_scheduled( node );
1084    ( *enqueue_scheduled_fifo )( context, node );
1085    needs_help = false;
1086  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1087    ( *extract_from_ready )( context, node );
1088
1089    needs_help = ( *enqueue_fifo )( context, node );
1090  } else {
1091    needs_help = true;
1092  }
1093
1094  if ( needs_help ) {
1095    _Scheduler_Ask_for_help( thread );
1096  }
1097}
1098
1099static inline void _Scheduler_SMP_Insert_scheduled_lifo(
1100  Scheduler_Context *context,
1101  Scheduler_Node    *node_to_insert
1102)
1103{
1104  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1105
1106  _Chain_Insert_ordered_unprotected(
1107    &self->Scheduled,
1108    &node_to_insert->Node.Chain,
1109    _Scheduler_SMP_Insert_priority_lifo_order
1110  );
1111}
1112
1113static inline void _Scheduler_SMP_Insert_scheduled_fifo(
1114  Scheduler_Context *context,
1115  Scheduler_Node    *node_to_insert
1116)
1117{
1118  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1119
1120  _Chain_Insert_ordered_unprotected(
1121    &self->Scheduled,
1122    &node_to_insert->Node.Chain,
1123    _Scheduler_SMP_Insert_priority_fifo_order
1124  );
1125}
1126
1127static inline bool _Scheduler_SMP_Ask_for_help(
1128  Scheduler_Context                  *context,
1129  Thread_Control                     *thread,
1130  Scheduler_Node                     *node,
1131  Chain_Node_order                    order,
1132  Scheduler_SMP_Insert                insert_ready,
1133  Scheduler_SMP_Insert                insert_scheduled,
1134  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1135  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1136  Scheduler_SMP_Allocate_processor    allocate_processor
1137)
1138{
1139  Scheduler_Node   *lowest_scheduled;
1140  ISR_lock_Context  lock_context;
1141  bool              success;
1142
1143  lowest_scheduled = ( *get_lowest_scheduled )( context, node, order );
1144
1145  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1146
1147  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1148    Scheduler_SMP_Node_state node_state;
1149
1150    node_state = _Scheduler_SMP_Node_state( node );
1151
1152    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1153      if ( ( *order )( &node->Node.Chain, &lowest_scheduled->Node.Chain ) ) {
1154        _Thread_Scheduler_cancel_need_for_help(
1155          thread,
1156          _Thread_Get_CPU( thread )
1157        );
1158        _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1159        _Thread_Scheduler_release_critical( thread, &lock_context );
1160
1161        _Scheduler_SMP_Preempt(
1162          context,
1163          node,
1164          lowest_scheduled,
1165          allocate_processor
1166        );
1167
1168        ( *insert_scheduled )( context, node );
1169        ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1170
1171        _Scheduler_Release_idle_thread(
1172          context,
1173          lowest_scheduled,
1174          _Scheduler_SMP_Release_idle_thread
1175        );
1176        success = true;
1177      } else {
1178        _Thread_Scheduler_release_critical( thread, &lock_context );
1179        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1180        ( *insert_ready )( context, node );
1181        success = false;
1182      }
1183    } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1184      _Thread_Scheduler_cancel_need_for_help(
1185        thread,
1186        _Thread_Get_CPU( thread )
1187      );
1188      _Scheduler_Discard_idle_thread(
1189        context,
1190        thread,
1191        node,
1192        _Scheduler_SMP_Release_idle_thread
1193      );
1194      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1195      _Thread_Scheduler_release_critical( thread, &lock_context );
1196      success = true;
1197    } else {
1198      _Thread_Scheduler_release_critical( thread, &lock_context );
1199      success = false;
1200    }
1201  } else {
1202    _Thread_Scheduler_release_critical( thread, &lock_context );
1203    success = false;
1204  }
1205
1206  return success;
1207}
1208
1209static inline void _Scheduler_SMP_Reconsider_help_request(
1210  Scheduler_Context     *context,
1211  Thread_Control        *thread,
1212  Scheduler_Node        *node,
1213  Scheduler_SMP_Extract  extract_from_ready
1214)
1215{
1216  ISR_lock_Context lock_context;
1217
1218  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1219
1220  if (
1221    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1222      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1223      && node->sticky_level == 1
1224  ) {
1225    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1226    ( *extract_from_ready )( context, node );
1227  }
1228
1229  _Thread_Scheduler_release_critical( thread, &lock_context );
1230}
1231
1232static inline void _Scheduler_SMP_Withdraw_node(
1233  Scheduler_Context                *context,
1234  Thread_Control                   *thread,
1235  Scheduler_Node                   *node,
1236  Thread_Scheduler_state            next_state,
1237  Scheduler_SMP_Extract             extract_from_ready,
1238  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1239  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1240  Scheduler_SMP_Allocate_processor  allocate_processor
1241)
1242{
1243  ISR_lock_Context         lock_context;
1244  Scheduler_SMP_Node_state node_state;
1245
1246  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1247
1248  node_state = _Scheduler_SMP_Node_state( node );
1249  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1250
1251  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1252    Per_CPU_Control *thread_cpu;
1253
1254    thread_cpu = _Thread_Get_CPU( thread );
1255    _Scheduler_Thread_change_state( thread, next_state );
1256    _Thread_Scheduler_release_critical( thread, &lock_context );
1257
1258    _Scheduler_SMP_Extract_from_scheduled( node );
1259    _Scheduler_SMP_Schedule_highest_ready(
1260      context,
1261      node,
1262      thread_cpu,
1263      extract_from_ready,
1264      get_highest_ready,
1265      move_from_ready_to_scheduled,
1266      allocate_processor
1267    );
1268  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1269    _Thread_Scheduler_release_critical( thread, &lock_context );
1270    ( *extract_from_ready )( context, node );
1271  } else {
1272    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1273    _Thread_Scheduler_release_critical( thread, &lock_context );
1274  }
1275}
1276
1277static inline void _Scheduler_SMP_Add_processor(
1278  Scheduler_Context       *context,
1279  Thread_Control          *idle,
1280  Scheduler_SMP_Has_ready  has_ready,
1281  Scheduler_SMP_Enqueue    enqueue_scheduled_fifo
1282)
1283{
1284  Scheduler_SMP_Context *self;
1285  Scheduler_Node        *node;
1286
1287  self = _Scheduler_SMP_Get_self( context );
1288  idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1289  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1290  node = _Thread_Scheduler_get_home_node( idle );
1291  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
1292
1293  if ( ( *has_ready )( &self->Base ) ) {
1294    ( *enqueue_scheduled_fifo )( &self->Base, node );
1295  } else {
1296    _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
1297  }
1298}
1299
1300static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1301  Scheduler_Context     *context,
1302  Per_CPU_Control       *cpu,
1303  Scheduler_SMP_Extract  extract_from_ready,
1304  Scheduler_SMP_Enqueue  enqueue_fifo
1305)
1306{
1307  Scheduler_SMP_Context *self;
1308  Chain_Node            *chain_node;
1309  Scheduler_Node        *victim_node;
1310  Thread_Control        *victim_user;
1311  Thread_Control        *victim_owner;
1312  Thread_Control        *idle;
1313
1314  self = _Scheduler_SMP_Get_self( context );
1315  chain_node = _Chain_First( &self->Scheduled );
1316
1317  do {
1318    _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1319    victim_node = (Scheduler_Node *) chain_node;
1320    victim_user = _Scheduler_Node_get_user( victim_node );
1321    chain_node = _Chain_Next( chain_node );
1322  } while ( _Thread_Get_CPU( victim_user ) != cpu );
1323
1324  _Scheduler_SMP_Extract_from_scheduled( victim_node );
1325  victim_owner = _Scheduler_Node_get_owner( victim_node );
1326
1327  if ( !victim_owner->is_idle ) {
1328    Scheduler_Node *idle_node;
1329
1330    _Scheduler_Release_idle_thread(
1331      &self->Base,
1332      victim_node,
1333      _Scheduler_SMP_Release_idle_thread
1334    );
1335    idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
1336    idle_node = _Thread_Scheduler_get_home_node( idle );
1337    ( *extract_from_ready )( &self->Base, idle_node );
1338    _Scheduler_SMP_Preempt(
1339      &self->Base,
1340      idle_node,
1341      victim_node,
1342      _Scheduler_SMP_Allocate_processor_exact
1343    );
1344
1345    if ( !_Chain_Is_empty( &self->Scheduled ) ) {
1346      ( *enqueue_fifo )( context, victim_node );
1347    }
1348  } else {
1349    _Assert( victim_owner == victim_user );
1350    _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
1351    idle = victim_owner;
1352    _Scheduler_SMP_Exctract_idle_thread( idle );
1353  }
1354
1355  return idle;
1356}
1357
1358/** @} */
1359
1360#ifdef __cplusplus
1361}
1362#endif /* __cplusplus */
1363
1364#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.