source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ 6771359f

Last change on this file since 6771359f was 6771359f, checked in by Sebastian Huber <sebastian.huber@…>, on Oct 27, 2016 at 4:42:06 AM

score: Second part of new MrsP implementation

Update #2556.

  • Property mode set to 100644
File size: 35.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30#include <rtems/bspIo.h>
31
32#ifdef __cplusplus
33extern "C" {
34#endif /* __cplusplus */
35
36/**
37 * @addtogroup ScoreSchedulerSMP
38 *
39 * The scheduler nodes can be in four states
40 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
41 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
42 * - @ref SCHEDULER_SMP_NODE_READY.
43 *
44 * State transitions are triggered via basic operations
45 * - _Scheduler_SMP_Enqueue_ordered(),
46 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
47 * - _Scheduler_SMP_Block().
48 *
49 * @dot
50 * digraph {
51 *   node [style="filled"];
52 *
53 *   bs [label="BLOCKED"];
54 *   ss [label="SCHEDULED", fillcolor="green"];
55 *   rs [label="READY", fillcolor="red"];
56 *
57 *   edge [label="enqueue"];
58 *   edge [fontcolor="darkgreen", color="darkgreen"];
59 *
60 *   bs -> ss;
61 *
62 *   edge [fontcolor="red", color="red"];
63 *
64 *   bs -> rs;
65 *
66 *   edge [label="enqueue other"];
67 *
68 *   ss -> rs;
69 *
70 *   edge [label="block"];
71 *   edge [fontcolor="black", color="black"];
72 *
73 *   ss -> bs;
74 *   rs -> bs;
75 *
76 *   edge [label="block other"];
77 *   edge [fontcolor="darkgreen", color="darkgreen"];
78 *
79 *   rs -> ss;
80 * }
81 * @enddot
82 *
83 * During system initialization each processor of the scheduler instance starts
84 * with an idle thread assigned to it.  Lets have a look at an example with two
85 * idle threads I and J with priority 5.  We also have blocked threads A, B and
86 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
87 * with respect to the thread priority from left to right in the below
88 * diagrams.  The highest priority node (lowest priority number) is the
89 * leftmost node.  Since the processor assignment is independent of the thread
90 * priority the processor indices may move from one state to the other.
91 *
92 * @dot
93 * digraph {
94 *   node [style="filled"];
95 *   edge [dir="none"];
96 *   subgraph {
97 *     rank = same;
98 *
99 *     i [label="I (5)", fillcolor="green"];
100 *     j [label="J (5)", fillcolor="green"];
101 *     a [label="A (1)"];
102 *     b [label="B (2)"];
103 *     c [label="C (3)"];
104 *     i -> j;
105 *   }
106 *
107 *   subgraph {
108 *     rank = same;
109 *
110 *     p0 [label="PROCESSOR 0", shape="box"];
111 *     p1 [label="PROCESSOR 1", shape="box"];
112 *   }
113 *
114 *   i -> p0;
115 *   j -> p1;
116 * }
117 * @enddot
118 *
119 * Lets start A.  For this an enqueue operation is performed.
120 *
121 * @dot
122 * digraph {
123 *   node [style="filled"];
124 *   edge [dir="none"];
125 *
126 *   subgraph {
127 *     rank = same;
128 *
129 *     i [label="I (5)", fillcolor="green"];
130 *     j [label="J (5)", fillcolor="red"];
131 *     a [label="A (1)", fillcolor="green"];
132 *     b [label="B (2)"];
133 *     c [label="C (3)"];
134 *     a -> i;
135 *   }
136 *
137 *   subgraph {
138 *     rank = same;
139 *
140 *     p0 [label="PROCESSOR 0", shape="box"];
141 *     p1 [label="PROCESSOR 1", shape="box"];
142 *   }
143 *
144 *   i -> p0;
145 *   a -> p1;
146 * }
147 * @enddot
148 *
149 * Lets start C.
150 *
151 * @dot
152 * digraph {
153 *   node [style="filled"];
154 *   edge [dir="none"];
155 *
156 *   subgraph {
157 *     rank = same;
158 *
159 *     a [label="A (1)", fillcolor="green"];
160 *     c [label="C (3)", fillcolor="green"];
161 *     i [label="I (5)", fillcolor="red"];
162 *     j [label="J (5)", fillcolor="red"];
163 *     b [label="B (2)"];
164 *     a -> c;
165 *     i -> j;
166 *   }
167 *
168 *   subgraph {
169 *     rank = same;
170 *
171 *     p0 [label="PROCESSOR 0", shape="box"];
172 *     p1 [label="PROCESSOR 1", shape="box"];
173 *   }
174 *
175 *   c -> p0;
176 *   a -> p1;
177 * }
178 * @enddot
179 *
180 * Lets start B.
181 *
182 * @dot
183 * digraph {
184 *   node [style="filled"];
185 *   edge [dir="none"];
186 *
187 *   subgraph {
188 *     rank = same;
189 *
190 *     a [label="A (1)", fillcolor="green"];
191 *     b [label="B (2)", fillcolor="green"];
192 *     c [label="C (3)", fillcolor="red"];
193 *     i [label="I (5)", fillcolor="red"];
194 *     j [label="J (5)", fillcolor="red"];
195 *     a -> b;
196 *     c -> i -> j;
197 *   }
198 *
199 *   subgraph {
200 *     rank = same;
201 *
202 *     p0 [label="PROCESSOR 0", shape="box"];
203 *     p1 [label="PROCESSOR 1", shape="box"];
204 *   }
205 *
206 *   b -> p0;
207 *   a -> p1;
208 * }
209 * @enddot
210 *
211 * Lets change the priority of thread A to 4.
212 *
213 * @dot
214 * digraph {
215 *   node [style="filled"];
216 *   edge [dir="none"];
217 *
218 *   subgraph {
219 *     rank = same;
220 *
221 *     b [label="B (2)", fillcolor="green"];
222 *     c [label="C (3)", fillcolor="green"];
223 *     a [label="A (4)", fillcolor="red"];
224 *     i [label="I (5)", fillcolor="red"];
225 *     j [label="J (5)", fillcolor="red"];
226 *     b -> c;
227 *     a -> i -> j;
228 *   }
229 *
230 *   subgraph {
231 *     rank = same;
232 *
233 *     p0 [label="PROCESSOR 0", shape="box"];
234 *     p1 [label="PROCESSOR 1", shape="box"];
235 *   }
236 *
237 *   b -> p0;
238 *   c -> p1;
239 * }
240 * @enddot
241 *
242 * Now perform a blocking operation with thread B.  Please note that thread A
243 * migrated now from processor 0 to processor 1 and thread C still executes on
244 * processor 1.
245 *
246 * @dot
247 * digraph {
248 *   node [style="filled"];
249 *   edge [dir="none"];
250 *
251 *   subgraph {
252 *     rank = same;
253 *
254 *     c [label="C (3)", fillcolor="green"];
255 *     a [label="A (4)", fillcolor="green"];
256 *     i [label="I (5)", fillcolor="red"];
257 *     j [label="J (5)", fillcolor="red"];
258 *     b [label="B (2)"];
259 *     c -> a;
260 *     i -> j;
261 *   }
262 *
263 *   subgraph {
264 *     rank = same;
265 *
266 *     p0 [label="PROCESSOR 0", shape="box"];
267 *     p1 [label="PROCESSOR 1", shape="box"];
268 *   }
269 *
270 *   a -> p0;
271 *   c -> p1;
272 * }
273 * @enddot
274 *
275 * @{
276 */
277
278typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
279  Scheduler_Context *context,
280  Scheduler_Node    *node
281);
282
283typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
284  Scheduler_Context *context,
285  Scheduler_Node    *filter,
286  Chain_Node_order   order
287);
288
289typedef void ( *Scheduler_SMP_Extract )(
290  Scheduler_Context *context,
291  Scheduler_Node    *node_to_extract
292);
293
294typedef void ( *Scheduler_SMP_Insert )(
295  Scheduler_Context *context,
296  Scheduler_Node    *node_to_insert
297);
298
299typedef void ( *Scheduler_SMP_Move )(
300  Scheduler_Context *context,
301  Scheduler_Node    *node_to_move
302);
303
304typedef bool ( *Scheduler_SMP_Ask_for_help )(
305  Scheduler_Context *context,
306  Thread_Control    *thread,
307  Scheduler_Node    *node
308);
309
310typedef void ( *Scheduler_SMP_Update )(
311  Scheduler_Context *context,
312  Scheduler_Node    *node_to_update,
313  Priority_Control   new_priority
314);
315
316typedef Thread_Control *( *Scheduler_SMP_Enqueue )(
317  Scheduler_Context *context,
318  Scheduler_Node    *node_to_enqueue,
319  Thread_Control    *needs_help
320);
321
322typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )(
323  Scheduler_Context *context,
324  Scheduler_Node    *node_to_enqueue
325);
326
327typedef void ( *Scheduler_SMP_Allocate_processor )(
328  Scheduler_Context *context,
329  Thread_Control    *scheduled_thread,
330  Thread_Control    *victim_thread,
331  Per_CPU_Control   *victim_cpu
332);
333
334static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
335  const Chain_Node *to_insert,
336  const Chain_Node *next
337)
338{
339  const Scheduler_SMP_Node *node_to_insert =
340    (const Scheduler_SMP_Node *) to_insert;
341  const Scheduler_SMP_Node *node_next =
342    (const Scheduler_SMP_Node *) next;
343
344  return node_to_insert->priority <= node_next->priority;
345}
346
347static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
348  const Chain_Node *to_insert,
349  const Chain_Node *next
350)
351{
352  const Scheduler_SMP_Node *node_to_insert =
353    (const Scheduler_SMP_Node *) to_insert;
354  const Scheduler_SMP_Node *node_next =
355    (const Scheduler_SMP_Node *) next;
356
357  return node_to_insert->priority < node_next->priority;
358}
359
360static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
361  Scheduler_Context *context
362)
363{
364  return (Scheduler_SMP_Context *) context;
365}
366
367static inline void _Scheduler_SMP_Initialize(
368  Scheduler_SMP_Context *self
369)
370{
371  _Chain_Initialize_empty( &self->Scheduled );
372  _Chain_Initialize_empty( &self->Idle_threads );
373}
374
375static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
376  Thread_Control *thread
377)
378{
379  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread );
380}
381
382static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
383  Thread_Control *thread
384)
385{
386  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_own_node( thread );
387}
388
389static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
390  Scheduler_Node *node
391)
392{
393  return (Scheduler_SMP_Node *) node;
394}
395
396static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
397  const Scheduler_Node *node
398)
399{
400  return ( (const Scheduler_SMP_Node *) node )->state;
401}
402
403static inline Priority_Control _Scheduler_SMP_Node_priority(
404  const Scheduler_Node *node
405)
406{
407  return ( (const Scheduler_SMP_Node *) node )->priority;
408}
409
410static inline void _Scheduler_SMP_Node_initialize(
411  const Scheduler_Control *scheduler,
412  Scheduler_SMP_Node      *node,
413  Thread_Control          *thread,
414  Priority_Control         priority
415)
416{
417  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
418  node->state = SCHEDULER_SMP_NODE_BLOCKED;
419  node->priority = priority;
420}
421
422static inline void _Scheduler_SMP_Node_update_priority(
423  Scheduler_SMP_Node *node,
424  Priority_Control    new_priority
425)
426{
427  node->priority = new_priority;
428}
429
430extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
431
432static inline void _Scheduler_SMP_Node_change_state(
433  Scheduler_Node           *node,
434  Scheduler_SMP_Node_state  new_state
435)
436{
437  Scheduler_SMP_Node *the_node;
438
439  the_node = _Scheduler_SMP_Node_downcast( node );
440  _Assert(
441    _Scheduler_SMP_Node_valid_state_changes[ the_node->state ][ new_state ]
442  );
443
444  the_node->state = new_state;
445}
446
447static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
448  const Scheduler_Context *context,
449  const Per_CPU_Control   *cpu
450)
451{
452  return cpu->scheduler_context == context;
453}
454
455static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
456  Scheduler_Context *context
457)
458{
459  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
460  Thread_Control *idle = (Thread_Control *)
461    _Chain_Get_first_unprotected( &self->Idle_threads );
462
463  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
464
465  return idle;
466}
467
468static inline void _Scheduler_SMP_Release_idle_thread(
469  Scheduler_Context *context,
470  Thread_Control    *idle
471)
472{
473  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
474
475  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
476}
477
478static inline void _Scheduler_SMP_Allocate_processor_lazy(
479  Scheduler_Context *context,
480  Thread_Control    *scheduled_thread,
481  Thread_Control    *victim_thread,
482  Per_CPU_Control   *victim_cpu
483)
484{
485  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
486  Per_CPU_Control *cpu_self = _Per_CPU_Get();
487  Thread_Control *heir;
488
489  _Assert( _ISR_Get_level() != 0 );
490
491  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
492    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
493      heir = scheduled_cpu->heir;
494      _Thread_Dispatch_update_heir(
495        cpu_self,
496        scheduled_cpu,
497        scheduled_thread
498      );
499    } else {
500      /* We have to force a migration to our processor set */
501      heir = scheduled_thread;
502    }
503  } else {
504    heir = scheduled_thread;
505  }
506
507  if ( heir != victim_thread ) {
508    _Thread_Set_CPU( heir, victim_cpu );
509    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
510  }
511}
512
513/*
514 * This method is slightly different from
515 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
516 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
517 * but does not take into account affinity.
518 */
519static inline void _Scheduler_SMP_Allocate_processor_exact(
520  Scheduler_Context *context,
521  Thread_Control    *scheduled_thread,
522  Thread_Control    *victim_thread,
523  Per_CPU_Control   *victim_cpu
524)
525{
526  Per_CPU_Control *cpu_self = _Per_CPU_Get();
527
528  (void) context;
529
530  _Thread_Set_CPU( scheduled_thread, victim_cpu );
531  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
532}
533
534static inline void _Scheduler_SMP_Allocate_processor(
535  Scheduler_Context                *context,
536  Scheduler_Node                   *scheduled,
537  Thread_Control                   *victim_thread,
538  Per_CPU_Control                  *victim_cpu,
539  Scheduler_SMP_Allocate_processor  allocate_processor
540)
541{
542  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
543
544  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
545
546  ( *allocate_processor )(
547    context,
548    scheduled_thread,
549    victim_thread,
550    victim_cpu
551  );
552}
553
554static inline Thread_Control *_Scheduler_SMP_Preempt(
555  Scheduler_Context                *context,
556  Scheduler_Node                   *scheduled,
557  Scheduler_Node                   *victim,
558  Scheduler_SMP_Allocate_processor  allocate_processor
559)
560{
561  Thread_Control   *victim_thread;
562  ISR_lock_Context  lock_context;
563  Per_CPU_Control  *victim_cpu;
564
565  victim_thread = _Scheduler_Node_get_user( victim );
566  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
567
568  _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
569
570  victim_cpu = _Thread_Get_CPU( victim_thread );
571
572  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
573    _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
574
575    if ( victim_thread->Scheduler.helping_nodes > 0 ) {
576      _Per_CPU_Acquire( victim_cpu );
577      _Chain_Append_unprotected(
578        &victim_cpu->Threads_in_need_for_help,
579        &victim_thread->Scheduler.Help_node
580      );
581      _Per_CPU_Release( victim_cpu );
582    }
583  }
584
585  _Thread_Scheduler_release_critical( victim_thread, &lock_context );
586
587  _Scheduler_SMP_Allocate_processor(
588    context,
589    scheduled,
590    victim_thread,
591    victim_cpu,
592    allocate_processor
593  );
594
595  return victim_thread;
596}
597
598static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
599  Scheduler_Context *context,
600  Scheduler_Node    *filter,
601  Chain_Node_order   order
602)
603{
604  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
605  Chain_Control *scheduled = &self->Scheduled;
606  Scheduler_Node *lowest_scheduled =
607    (Scheduler_Node *) _Chain_Last( scheduled );
608
609  (void) filter;
610  (void) order;
611
612  _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
613  _Assert(
614    _Chain_Next( &lowest_scheduled->Node ) == _Chain_Tail( scheduled )
615  );
616
617  return lowest_scheduled;
618}
619
620static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
621  Scheduler_Context                *context,
622  Scheduler_Node                   *node,
623  Scheduler_Node                   *lowest_scheduled,
624  Scheduler_SMP_Insert              insert_scheduled,
625  Scheduler_SMP_Move                move_from_scheduled_to_ready,
626  Scheduler_SMP_Allocate_processor  allocate_processor
627)
628{
629  Thread_Control *needs_help;
630  Scheduler_Try_to_schedule_action action;
631
632  action = _Scheduler_Try_to_schedule_node(
633    context,
634    node,
635    _Scheduler_Node_get_idle( lowest_scheduled ),
636    _Scheduler_SMP_Get_idle_thread
637  );
638
639  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
640    Thread_Control *lowest_scheduled_user;
641    Thread_Control *idle;
642
643    lowest_scheduled_user = _Scheduler_SMP_Preempt(
644      context,
645      node,
646      lowest_scheduled,
647      allocate_processor
648    );
649
650    ( *insert_scheduled )( context, node );
651    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
652
653    idle = _Scheduler_Release_idle_thread(
654      context,
655      lowest_scheduled,
656      _Scheduler_SMP_Release_idle_thread
657    );
658    if ( idle == NULL ) {
659      needs_help = lowest_scheduled_user;
660    } else {
661      needs_help = NULL;
662    }
663  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
664    _Scheduler_SMP_Node_change_state(
665      lowest_scheduled,
666      SCHEDULER_SMP_NODE_READY
667    );
668    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
669
670    ( *insert_scheduled )( context, node );
671    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
672
673    _Scheduler_Exchange_idle_thread(
674      node,
675      lowest_scheduled,
676      _Scheduler_Node_get_idle( lowest_scheduled )
677    );
678
679    needs_help = NULL;
680  } else {
681    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
682    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
683    needs_help = NULL;
684  }
685
686  return needs_help;
687}
688
689/**
690 * @brief Enqueues a node according to the specified order function.
691 *
692 * The node must not be in the scheduled state.
693 *
694 * @param[in] context The scheduler instance context.
695 * @param[in] node The node to enqueue.
696 * @param[in] needs_help The thread needing help in case the node cannot be
697 *   scheduled.
698 * @param[in] order The order function.
699 * @param[in] insert_ready Function to insert a node into the set of ready
700 *   nodes.
701 * @param[in] insert_scheduled Function to insert a node into the set of
702 *   scheduled nodes.
703 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
704 *   of scheduled nodes to the set of ready nodes.
705 * @param[in] get_lowest_scheduled Function to select the node from the
706 *   scheduled nodes to replace.  It may not be possible to find one, in this
707 *   case a pointer must be returned so that the order functions returns false
708 *   if this pointer is passed as the second argument to the order function.
709 * @param[in] allocate_processor Function to allocate a processor to a node
710 *   based on the rules of the scheduler.
711 */
712static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
713  Scheduler_Context                  *context,
714  Scheduler_Node                     *node,
715  Thread_Control                     *needs_help,
716  Chain_Node_order                    order,
717  Scheduler_SMP_Insert                insert_ready,
718  Scheduler_SMP_Insert                insert_scheduled,
719  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
720  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
721  Scheduler_SMP_Allocate_processor    allocate_processor
722)
723{
724  Scheduler_Node *lowest_scheduled =
725    ( *get_lowest_scheduled )( context, node, order );
726
727  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
728    needs_help = _Scheduler_SMP_Enqueue_to_scheduled(
729      context,
730      node,
731      lowest_scheduled,
732      insert_scheduled,
733      move_from_scheduled_to_ready,
734      allocate_processor
735    );
736  } else {
737    ( *insert_ready )( context, node );
738  }
739
740  return needs_help;
741}
742
743/**
744 * @brief Enqueues a scheduled node according to the specified order
745 * function.
746 *
747 * @param[in] context The scheduler instance context.
748 * @param[in] node The node to enqueue.
749 * @param[in] order The order function.
750 * @param[in] extract_from_ready Function to extract a node from the set of
751 *   ready nodes.
752 * @param[in] get_highest_ready Function to get the highest ready node.
753 * @param[in] insert_ready Function to insert a node into the set of ready
754 *   nodes.
755 * @param[in] insert_scheduled Function to insert a node into the set of
756 *   scheduled nodes.
757 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
758 *   of ready nodes to the set of scheduled nodes.
759 * @param[in] allocate_processor Function to allocate a processor to a node
760 *   based on the rules of the scheduler.
761 */
762static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
763  Scheduler_Context                *context,
764  Scheduler_Node                   *node,
765  Chain_Node_order                  order,
766  Scheduler_SMP_Extract             extract_from_ready,
767  Scheduler_SMP_Get_highest_ready   get_highest_ready,
768  Scheduler_SMP_Insert              insert_ready,
769  Scheduler_SMP_Insert              insert_scheduled,
770  Scheduler_SMP_Move                move_from_ready_to_scheduled,
771  Scheduler_SMP_Allocate_processor  allocate_processor
772)
773{
774  while ( true ) {
775    Scheduler_Node                   *highest_ready;
776    Scheduler_Try_to_schedule_action  action;
777
778    highest_ready = ( *get_highest_ready )( context, node );
779
780    /*
781     * The node has been extracted from the scheduled chain.  We have to place
782     * it now on the scheduled or ready set.
783     */
784    if (
785      node->sticky_level > 0
786        && ( *order )( &node->Node, &highest_ready->Node )
787    ) {
788      ( *insert_scheduled )( context, node );
789
790      if ( _Scheduler_Node_get_idle( node ) != NULL ) {
791        Thread_Control   *owner;
792        ISR_lock_Context  lock_context;
793
794        owner = _Scheduler_Node_get_owner( node );
795        _Thread_Scheduler_acquire_critical( owner, &lock_context );
796
797        if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
798          _Thread_Scheduler_cancel_need_for_help(
799            owner,
800            _Thread_Get_CPU( owner )
801          );
802          _Scheduler_Discard_idle_thread(
803            context,
804            owner,
805            node,
806            _Scheduler_SMP_Release_idle_thread
807          );
808          _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
809        }
810
811        _Thread_Scheduler_release_critical( owner, &lock_context );
812      }
813
814      return NULL;
815    }
816
817    action = _Scheduler_Try_to_schedule_node(
818      context,
819      highest_ready,
820      _Scheduler_Node_get_idle( node ),
821      _Scheduler_SMP_Get_idle_thread
822    );
823
824    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
825      Thread_Control *user;
826      Thread_Control *idle;
827
828      user = _Scheduler_SMP_Preempt(
829        context,
830        highest_ready,
831        node,
832        allocate_processor
833      );
834
835      ( *insert_ready )( context, node );
836      ( *move_from_ready_to_scheduled )( context, highest_ready );
837
838      idle = _Scheduler_Release_idle_thread(
839        context,
840        node,
841        _Scheduler_SMP_Release_idle_thread
842      );
843
844      if ( idle == NULL ) {
845        return user;
846      } else {
847        return NULL;
848      }
849    } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
850      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
851      _Scheduler_SMP_Node_change_state(
852        highest_ready,
853        SCHEDULER_SMP_NODE_SCHEDULED
854      );
855
856      ( *insert_ready )( context, node );
857      ( *move_from_ready_to_scheduled )( context, highest_ready );
858
859      _Scheduler_Exchange_idle_thread(
860        highest_ready,
861        node,
862        _Scheduler_Node_get_idle( node )
863      );
864      return NULL;
865    } else {
866      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
867
868      _Scheduler_SMP_Node_change_state(
869        highest_ready,
870        SCHEDULER_SMP_NODE_BLOCKED
871      );
872
873      ( *extract_from_ready )( context, highest_ready );
874    }
875  }
876}
877
878static inline void _Scheduler_SMP_Extract_from_scheduled(
879  Scheduler_Node *node
880)
881{
882  _Chain_Extract_unprotected( &node->Node );
883}
884
885static inline void _Scheduler_SMP_Schedule_highest_ready(
886  Scheduler_Context                *context,
887  Scheduler_Node                   *victim,
888  Per_CPU_Control                  *victim_cpu,
889  Scheduler_SMP_Extract             extract_from_ready,
890  Scheduler_SMP_Get_highest_ready   get_highest_ready,
891  Scheduler_SMP_Move                move_from_ready_to_scheduled,
892  Scheduler_SMP_Allocate_processor  allocate_processor
893)
894{
895  Scheduler_Try_to_schedule_action action;
896
897  do {
898    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
899
900    action = _Scheduler_Try_to_schedule_node(
901      context,
902      highest_ready,
903      NULL,
904      _Scheduler_SMP_Get_idle_thread
905    );
906
907    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
908      _Scheduler_SMP_Allocate_processor(
909        context,
910        highest_ready,
911        _Scheduler_Node_get_user( victim ),
912        victim_cpu,
913        allocate_processor
914      );
915
916      ( *move_from_ready_to_scheduled )( context, highest_ready );
917    } else {
918      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
919
920      _Scheduler_SMP_Node_change_state(
921        highest_ready,
922        SCHEDULER_SMP_NODE_BLOCKED
923      );
924
925      ( *extract_from_ready )( context, highest_ready );
926    }
927  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
928}
929
930/**
931 * @brief Blocks a thread.
932 *
933 * @param[in] context The scheduler instance context.
934 * @param[in] thread The thread of the scheduling operation.
935 * @param[in] node The scheduler node of the thread to block.
936 * @param[in] extract_from_ready Function to extract a node from the set of
937 *   ready nodes.
938 * @param[in] get_highest_ready Function to get the highest ready node.
939 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
940 *   of ready nodes to the set of scheduled nodes.
941 */
942static inline void _Scheduler_SMP_Block(
943  Scheduler_Context                *context,
944  Thread_Control                   *thread,
945  Scheduler_Node                   *node,
946  Scheduler_SMP_Extract             extract_from_ready,
947  Scheduler_SMP_Get_highest_ready   get_highest_ready,
948  Scheduler_SMP_Move                move_from_ready_to_scheduled,
949  Scheduler_SMP_Allocate_processor  allocate_processor
950)
951{
952  Scheduler_SMP_Node_state  node_state;
953  Per_CPU_Control          *thread_cpu;
954
955  node_state = _Scheduler_SMP_Node_state( node );
956
957  thread_cpu = _Scheduler_Block_node(
958    context,
959    thread,
960    node,
961    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
962    _Scheduler_SMP_Get_idle_thread
963  );
964
965  if ( thread_cpu != NULL ) {
966    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
967
968    if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
969      _Scheduler_SMP_Extract_from_scheduled( node );
970      _Scheduler_SMP_Schedule_highest_ready(
971        context,
972        node,
973        thread_cpu,
974        extract_from_ready,
975        get_highest_ready,
976        move_from_ready_to_scheduled,
977        allocate_processor
978      );
979    } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
980      ( *extract_from_ready )( context, node );
981    }
982  }
983}
984
985static inline Thread_Control *_Scheduler_SMP_Unblock(
986  Scheduler_Context     *context,
987  Thread_Control        *thread,
988  Scheduler_Node        *node,
989  Scheduler_SMP_Update   update,
990  Scheduler_SMP_Enqueue  enqueue_fifo
991)
992{
993  Scheduler_SMP_Node_state  node_state;
994  bool                      unblock;
995  Thread_Control           *needs_help;
996
997  node_state = _Scheduler_SMP_Node_state( node );
998  unblock = _Scheduler_Unblock_node(
999    context,
1000    thread,
1001    node,
1002    node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1003    _Scheduler_SMP_Release_idle_thread
1004  );
1005
1006  if ( unblock ) {
1007    Priority_Control new_priority;
1008    bool             prepend_it;
1009
1010    new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
1011    (void) prepend_it;
1012
1013    if ( new_priority != _Scheduler_SMP_Node_priority( node ) ) {
1014      ( *update )( context, node, new_priority );
1015    }
1016
1017    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1018      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1019
1020      needs_help = ( *enqueue_fifo )( context, node, thread );
1021    } else {
1022      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1023      _Assert( node->sticky_level > 0 );
1024      _Assert( node->idle == NULL );
1025
1026      if ( node->accepts_help == thread ) {
1027        needs_help = thread;
1028      } else {
1029        needs_help = NULL;
1030      }
1031    }
1032  } else {
1033    needs_help = NULL;
1034  }
1035
1036  return needs_help;
1037}
1038
1039static inline void _Scheduler_SMP_Update_priority(
1040  Scheduler_Context               *context,
1041  Thread_Control                  *thread,
1042  Scheduler_Node                  *node,
1043  Scheduler_SMP_Extract            extract_from_ready,
1044  Scheduler_SMP_Update             update,
1045  Scheduler_SMP_Enqueue            enqueue_fifo,
1046  Scheduler_SMP_Enqueue            enqueue_lifo,
1047  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo,
1048  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_lifo,
1049  Scheduler_SMP_Ask_for_help       ask_for_help
1050)
1051{
1052  Priority_Control         new_priority;
1053  bool                     prepend_it;
1054  Scheduler_SMP_Node_state node_state;
1055
1056  new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
1057
1058  if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) {
1059    if ( _Thread_Is_ready( thread ) ) {
1060      ( *ask_for_help )( context, thread, node );
1061    }
1062
1063    return;
1064  }
1065
1066  node_state = _Scheduler_SMP_Node_state( node );
1067
1068  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1069    _Scheduler_SMP_Extract_from_scheduled( node );
1070
1071    ( *update )( context, node, new_priority );
1072
1073    if ( prepend_it ) {
1074      ( *enqueue_scheduled_lifo )( context, node );
1075    } else {
1076      ( *enqueue_scheduled_fifo )( context, node );
1077    }
1078  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1079    ( *extract_from_ready )( context, node );
1080
1081    ( *update )( context, node, new_priority );
1082
1083    if ( prepend_it ) {
1084      ( *enqueue_lifo )( context, node, NULL );
1085    } else {
1086      ( *enqueue_fifo )( context, node, NULL );
1087    }
1088  } else {
1089    ( *update )( context, node, new_priority );
1090
1091    if ( _Thread_Is_ready( thread ) ) {
1092      ( *ask_for_help )( context, thread, node );
1093    }
1094  }
1095}
1096
1097static inline Thread_Control *_Scheduler_SMP_Yield(
1098  Scheduler_Context               *context,
1099  Thread_Control                  *thread,
1100  Scheduler_Node                  *node,
1101  Scheduler_SMP_Extract            extract_from_ready,
1102  Scheduler_SMP_Enqueue            enqueue_fifo,
1103  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo
1104)
1105{
1106  Thread_Control           *needs_help;
1107  Scheduler_SMP_Node_state  node_state;
1108
1109  node_state = _Scheduler_SMP_Node_state( node );
1110
1111  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1112    _Scheduler_SMP_Extract_from_scheduled( node );
1113
1114    needs_help = ( *enqueue_scheduled_fifo )( context, node );
1115  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1116    ( *extract_from_ready )( context, node );
1117
1118    needs_help = ( *enqueue_fifo )( context, node, NULL );
1119  } else {
1120    needs_help = thread;
1121  }
1122
1123  return needs_help;
1124}
1125
1126static inline void _Scheduler_SMP_Insert_scheduled_lifo(
1127  Scheduler_Context *context,
1128  Scheduler_Node    *node_to_insert
1129)
1130{
1131  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1132
1133  _Chain_Insert_ordered_unprotected(
1134    &self->Scheduled,
1135    &node_to_insert->Node,
1136    _Scheduler_SMP_Insert_priority_lifo_order
1137  );
1138}
1139
1140static inline void _Scheduler_SMP_Insert_scheduled_fifo(
1141  Scheduler_Context *context,
1142  Scheduler_Node    *node_to_insert
1143)
1144{
1145  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1146
1147  _Chain_Insert_ordered_unprotected(
1148    &self->Scheduled,
1149    &node_to_insert->Node,
1150    _Scheduler_SMP_Insert_priority_fifo_order
1151  );
1152}
1153
1154static inline bool _Scheduler_SMP_Ask_for_help(
1155  Scheduler_Context                  *context,
1156  Thread_Control                     *thread,
1157  Scheduler_Node                     *node,
1158  Chain_Node_order                    order,
1159  Scheduler_SMP_Insert                insert_ready,
1160  Scheduler_SMP_Insert                insert_scheduled,
1161  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1162  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1163  Scheduler_SMP_Allocate_processor    allocate_processor
1164)
1165{
1166  Scheduler_Node   *lowest_scheduled;
1167  ISR_lock_Context  lock_context;
1168  bool              success;
1169
1170  lowest_scheduled = ( *get_lowest_scheduled )( context, node, order );
1171
1172  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1173
1174  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1175    Scheduler_SMP_Node_state node_state;
1176
1177    node_state = _Scheduler_SMP_Node_state( node );
1178
1179    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1180      if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
1181        _Thread_Scheduler_cancel_need_for_help(
1182          thread,
1183          _Thread_Get_CPU( thread )
1184        );
1185        _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1186        _Thread_Scheduler_release_critical( thread, &lock_context );
1187
1188        _Scheduler_SMP_Preempt(
1189          context,
1190          node,
1191          lowest_scheduled,
1192          allocate_processor
1193        );
1194
1195        ( *insert_scheduled )( context, node );
1196        ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1197
1198        _Scheduler_Release_idle_thread(
1199          context,
1200          lowest_scheduled,
1201          _Scheduler_SMP_Release_idle_thread
1202        );
1203        success = true;
1204      } else {
1205        _Thread_Scheduler_release_critical( thread, &lock_context );
1206        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1207        ( *insert_ready )( context, node );
1208        success = false;
1209      }
1210    } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1211      _Thread_Scheduler_cancel_need_for_help(
1212        thread,
1213        _Thread_Get_CPU( thread )
1214      );
1215      _Scheduler_Discard_idle_thread(
1216        context,
1217        thread,
1218        node,
1219        _Scheduler_SMP_Release_idle_thread
1220      );
1221      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1222      _Thread_Scheduler_release_critical( thread, &lock_context );
1223      success = true;
1224    } else {
1225      _Thread_Scheduler_release_critical( thread, &lock_context );
1226      success = false;
1227    }
1228  } else {
1229    _Thread_Scheduler_release_critical( thread, &lock_context );
1230    success = false;
1231  }
1232
1233  return success;
1234}
1235
1236static inline void _Scheduler_SMP_Reconsider_help_request(
1237  Scheduler_Context     *context,
1238  Thread_Control        *thread,
1239  Scheduler_Node        *node,
1240  Scheduler_SMP_Extract  extract_from_ready
1241)
1242{
1243  ISR_lock_Context lock_context;
1244
1245  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1246
1247  if (
1248    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1249      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1250      && node->sticky_level == 1
1251  ) {
1252    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1253    ( *extract_from_ready )( context, node );
1254  }
1255
1256  _Thread_Scheduler_release_critical( thread, &lock_context );
1257}
1258
1259static inline void _Scheduler_SMP_Withdraw_node(
1260  Scheduler_Context                *context,
1261  Thread_Control                   *thread,
1262  Scheduler_Node                   *node,
1263  Thread_Scheduler_state            next_state,
1264  Scheduler_SMP_Extract             extract_from_ready,
1265  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1266  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1267  Scheduler_SMP_Allocate_processor  allocate_processor
1268)
1269{
1270  ISR_lock_Context         lock_context;
1271  Scheduler_SMP_Node_state node_state;
1272
1273  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1274
1275  node_state = _Scheduler_SMP_Node_state( node );
1276  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1277
1278  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1279    Per_CPU_Control *thread_cpu;
1280
1281    thread_cpu = _Thread_Get_CPU( thread );
1282    _Scheduler_Thread_change_state( thread, next_state );
1283    _Thread_Scheduler_release_critical( thread, &lock_context );
1284
1285    _Scheduler_SMP_Extract_from_scheduled( node );
1286    _Scheduler_SMP_Schedule_highest_ready(
1287      context,
1288      node,
1289      thread_cpu,
1290      extract_from_ready,
1291      get_highest_ready,
1292      move_from_ready_to_scheduled,
1293      allocate_processor
1294    );
1295  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1296    _Thread_Scheduler_release_critical( thread, &lock_context );
1297    ( *extract_from_ready )( context, node );
1298  } else {
1299    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1300    _Thread_Scheduler_release_critical( thread, &lock_context );
1301  }
1302}
1303
1304/** @} */
1305
1306#ifdef __cplusplus
1307}
1308#endif /* __cplusplus */
1309
1310#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.