source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ 5c3d250

4.115
Last change on this file since 5c3d250 was 5c3d250, checked in by Sebastian Huber <sebastian.huber@…>, on 07/04/14 at 12:34:23

score: Implement scheduler helping protocol

The following scheduler operations return a thread in need for help

  • unblock,
  • change priority, and
  • yield.

A thread in need for help is a thread that encounters a scheduler state
change from scheduled to ready or a thread that cannot be scheduled in
an unblock operation. Such a thread can ask threads which depend on
resources owned by this thread for help.

Add a new ask for help scheduler operation. This operation is used by
_Scheduler_Ask_for_help() to help threads in need for help returned by
the operations mentioned above. This operation is also used by
_Scheduler_Thread_change_resource_root() in case the root of a resource
sub-tree changes. A use case is the ownership change of a resource.

In case it is not possible to schedule a thread in need for help, then
the corresponding scheduler node will be placed into the set of ready
scheduler nodes of the scheduler instance. Once a state change from
ready to scheduled happens for this scheduler node it may be used to
schedule the thread in need for help.

  • Property mode set to 100644
File size: 27.0 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
41 * - @ref SCHEDULER_SMP_NODE_READY.
42 *
43 * State transitions are triggered via basic operations
44 * - _Scheduler_SMP_Enqueue_ordered(),
45 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
46 * - _Scheduler_SMP_Block().
47 *
48 * @dot
49 * digraph {
50 *   node [style="filled"];
51 *
52 *   bs [label="BLOCKED"];
53 *   ss [label="SCHEDULED", fillcolor="green"];
54 *   rs [label="READY", fillcolor="red"];
55 *
56 *   edge [label="enqueue"];
57 *   edge [fontcolor="darkgreen", color="darkgreen"];
58 *
59 *   bs -> ss;
60 *
61 *   edge [fontcolor="red", color="red"];
62 *
63 *   bs -> rs;
64 *
65 *   edge [label="enqueue other"];
66 *
67 *   ss -> rs;
68 *
69 *   edge [label="block"];
70 *   edge [fontcolor="black", color="black"];
71 *
72 *   ss -> bs;
73 *   rs -> bs;
74 *
75 *   edge [label="block other"];
76 *   edge [fontcolor="darkgreen", color="darkgreen"];
77 *
78 *   rs -> ss;
79 * }
80 * @enddot
81 *
82 * During system initialization each processor of the scheduler instance starts
83 * with an idle thread assigned to it.  Lets have a look at an example with two
84 * idle threads I and J with priority 5.  We also have blocked threads A, B and
85 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
86 * with respect to the thread priority from left to right in the below
87 * diagrams.  The highest priority node (lowest priority number) is the
88 * leftmost node.  Since the processor assignment is independent of the thread
89 * priority the processor indices may move from one state to the other.
90 *
91 * @dot
92 * digraph {
93 *   node [style="filled"];
94 *   edge [dir="none"];
95 *   subgraph {
96 *     rank = same;
97 *
98 *     i [label="I (5)", fillcolor="green"];
99 *     j [label="J (5)", fillcolor="green"];
100 *     a [label="A (1)"];
101 *     b [label="B (2)"];
102 *     c [label="C (3)"];
103 *     i -> j;
104 *   }
105 *
106 *   subgraph {
107 *     rank = same;
108 *
109 *     p0 [label="PROCESSOR 0", shape="box"];
110 *     p1 [label="PROCESSOR 1", shape="box"];
111 *   }
112 *
113 *   i -> p0;
114 *   j -> p1;
115 * }
116 * @enddot
117 *
118 * Lets start A.  For this an enqueue operation is performed.
119 *
120 * @dot
121 * digraph {
122 *   node [style="filled"];
123 *   edge [dir="none"];
124 *
125 *   subgraph {
126 *     rank = same;
127 *
128 *     i [label="I (5)", fillcolor="green"];
129 *     j [label="J (5)", fillcolor="red"];
130 *     a [label="A (1)", fillcolor="green"];
131 *     b [label="B (2)"];
132 *     c [label="C (3)"];
133 *     a -> i;
134 *   }
135 *
136 *   subgraph {
137 *     rank = same;
138 *
139 *     p0 [label="PROCESSOR 0", shape="box"];
140 *     p1 [label="PROCESSOR 1", shape="box"];
141 *   }
142 *
143 *   i -> p0;
144 *   a -> p1;
145 * }
146 * @enddot
147 *
148 * Lets start C.
149 *
150 * @dot
151 * digraph {
152 *   node [style="filled"];
153 *   edge [dir="none"];
154 *
155 *   subgraph {
156 *     rank = same;
157 *
158 *     a [label="A (1)", fillcolor="green"];
159 *     c [label="C (3)", fillcolor="green"];
160 *     i [label="I (5)", fillcolor="red"];
161 *     j [label="J (5)", fillcolor="red"];
162 *     b [label="B (2)"];
163 *     a -> c;
164 *     i -> j;
165 *   }
166 *
167 *   subgraph {
168 *     rank = same;
169 *
170 *     p0 [label="PROCESSOR 0", shape="box"];
171 *     p1 [label="PROCESSOR 1", shape="box"];
172 *   }
173 *
174 *   c -> p0;
175 *   a -> p1;
176 * }
177 * @enddot
178 *
179 * Lets start B.
180 *
181 * @dot
182 * digraph {
183 *   node [style="filled"];
184 *   edge [dir="none"];
185 *
186 *   subgraph {
187 *     rank = same;
188 *
189 *     a [label="A (1)", fillcolor="green"];
190 *     b [label="B (2)", fillcolor="green"];
191 *     c [label="C (3)", fillcolor="red"];
192 *     i [label="I (5)", fillcolor="red"];
193 *     j [label="J (5)", fillcolor="red"];
194 *     a -> b;
195 *     c -> i -> j;
196 *   }
197 *
198 *   subgraph {
199 *     rank = same;
200 *
201 *     p0 [label="PROCESSOR 0", shape="box"];
202 *     p1 [label="PROCESSOR 1", shape="box"];
203 *   }
204 *
205 *   b -> p0;
206 *   a -> p1;
207 * }
208 * @enddot
209 *
210 * Lets change the priority of thread A to 4.
211 *
212 * @dot
213 * digraph {
214 *   node [style="filled"];
215 *   edge [dir="none"];
216 *
217 *   subgraph {
218 *     rank = same;
219 *
220 *     b [label="B (2)", fillcolor="green"];
221 *     c [label="C (3)", fillcolor="green"];
222 *     a [label="A (4)", fillcolor="red"];
223 *     i [label="I (5)", fillcolor="red"];
224 *     j [label="J (5)", fillcolor="red"];
225 *     b -> c;
226 *     a -> i -> j;
227 *   }
228 *
229 *   subgraph {
230 *     rank = same;
231 *
232 *     p0 [label="PROCESSOR 0", shape="box"];
233 *     p1 [label="PROCESSOR 1", shape="box"];
234 *   }
235 *
236 *   b -> p0;
237 *   c -> p1;
238 * }
239 * @enddot
240 *
241 * Now perform a blocking operation with thread B.  Please note that thread A
242 * migrated now from processor 0 to processor 1 and thread C still executes on
243 * processor 1.
244 *
245 * @dot
246 * digraph {
247 *   node [style="filled"];
248 *   edge [dir="none"];
249 *
250 *   subgraph {
251 *     rank = same;
252 *
253 *     c [label="C (3)", fillcolor="green"];
254 *     a [label="A (4)", fillcolor="green"];
255 *     i [label="I (5)", fillcolor="red"];
256 *     j [label="J (5)", fillcolor="red"];
257 *     b [label="B (2)"];
258 *     c -> a;
259 *     i -> j;
260 *   }
261 *
262 *   subgraph {
263 *     rank = same;
264 *
265 *     p0 [label="PROCESSOR 0", shape="box"];
266 *     p1 [label="PROCESSOR 1", shape="box"];
267 *   }
268 *
269 *   a -> p0;
270 *   c -> p1;
271 * }
272 * @enddot
273 *
274 * @{
275 */
276
277typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
278  Scheduler_Context *context,
279  Scheduler_Node    *node
280);
281
282typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
283  Scheduler_Context *context,
284  Scheduler_Node    *filter,
285  Chain_Node_order   order
286);
287
288typedef void ( *Scheduler_SMP_Extract )(
289  Scheduler_Context *context,
290  Scheduler_Node    *node_to_extract
291);
292
293typedef void ( *Scheduler_SMP_Insert )(
294  Scheduler_Context *context,
295  Scheduler_Node    *node_to_insert
296);
297
298typedef void ( *Scheduler_SMP_Move )(
299  Scheduler_Context *context,
300  Scheduler_Node    *node_to_move
301);
302
303typedef void ( *Scheduler_SMP_Update )(
304  Scheduler_Context *context,
305  Scheduler_Node    *node_to_update,
306  Priority_Control   new_priority
307);
308
309typedef Thread_Control *( *Scheduler_SMP_Enqueue )(
310  Scheduler_Context *context,
311  Scheduler_Node    *node_to_enqueue,
312  Thread_Control    *needs_help
313);
314
315typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )(
316  Scheduler_Context *context,
317  Scheduler_Node    *node_to_enqueue
318);
319
320typedef void ( *Scheduler_SMP_Allocate_processor )(
321  Scheduler_Context *context,
322  Thread_Control    *scheduled,
323  Thread_Control    *victim
324);
325
326static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
327  const Chain_Node *to_insert,
328  const Chain_Node *next
329)
330{
331  const Scheduler_SMP_Node *node_to_insert =
332    (const Scheduler_SMP_Node *) to_insert;
333  const Scheduler_SMP_Node *node_next =
334    (const Scheduler_SMP_Node *) next;
335
336  return node_to_insert->priority <= node_next->priority;
337}
338
339static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
340  const Chain_Node *to_insert,
341  const Chain_Node *next
342)
343{
344  const Scheduler_SMP_Node *node_to_insert =
345    (const Scheduler_SMP_Node *) to_insert;
346  const Scheduler_SMP_Node *node_next =
347    (const Scheduler_SMP_Node *) next;
348
349  return node_to_insert->priority < node_next->priority;
350}
351
352static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
353  Scheduler_Context *context
354)
355{
356  return (Scheduler_SMP_Context *) context;
357}
358
359static inline void _Scheduler_SMP_Initialize(
360  Scheduler_SMP_Context *self
361)
362{
363  _Chain_Initialize_empty( &self->Scheduled );
364  _Chain_Initialize_empty( &self->Idle_threads );
365}
366
367static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
368  Thread_Control *thread
369)
370{
371  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread );
372}
373
374static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
375  Thread_Control *thread
376)
377{
378  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_own_node( thread );
379}
380
381static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
382  Scheduler_Node *node
383)
384{
385  return (Scheduler_SMP_Node *) node;
386}
387
388static inline void _Scheduler_SMP_Node_initialize(
389  Scheduler_SMP_Node *node,
390  Thread_Control     *thread
391)
392{
393  _Scheduler_Node_do_initialize( &node->Base, thread );
394  node->state = SCHEDULER_SMP_NODE_BLOCKED;
395}
396
397static inline void _Scheduler_SMP_Node_update_priority(
398  Scheduler_SMP_Node *node,
399  Priority_Control    new_priority
400)
401{
402  node->priority = new_priority;
403}
404
405extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
406
407static inline void _Scheduler_SMP_Node_change_state(
408  Scheduler_SMP_Node      *node,
409  Scheduler_SMP_Node_state new_state
410)
411{
412  _Assert(
413    _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ]
414  );
415
416  node->state = new_state;
417}
418
419static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
420  const Scheduler_Context *context,
421  const Per_CPU_Control   *cpu
422)
423{
424  return cpu->scheduler_context == context;
425}
426
427static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
428  Scheduler_Context     *context,
429  Scheduler_SMP_Extract  extract_from_ready
430)
431{
432  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
433  Thread_Control *idle = (Thread_Control *)
434    _Chain_Get_first_unprotected( &self->Idle_threads );
435  Scheduler_Node *own_node = _Scheduler_Thread_get_own_node( idle );
436
437  ( *extract_from_ready )( &self->Base, own_node );
438
439  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
440
441  return idle;
442}
443
444static inline void _Scheduler_SMP_Release_idle_thread(
445  Scheduler_Context    *context,
446  Thread_Control       *idle,
447  Scheduler_SMP_Insert  insert_ready
448)
449{
450  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
451  Scheduler_Node *own_node = _Scheduler_Thread_get_own_node( idle );
452
453  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
454  ( *insert_ready )( context, own_node );
455}
456
457static inline void _Scheduler_SMP_Allocate_processor_lazy(
458  Scheduler_Context *context,
459  Thread_Control    *scheduled_thread,
460  Thread_Control    *victim_thread
461)
462{
463  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
464  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
465  Per_CPU_Control *cpu_self = _Per_CPU_Get();
466  Thread_Control *heir;
467
468  _Assert( _ISR_Get_level() != 0 );
469
470  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
471    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
472      heir = scheduled_cpu->heir;
473      _Thread_Dispatch_update_heir(
474        cpu_self,
475        scheduled_cpu,
476        scheduled_thread
477      );
478    } else {
479      /* We have to force a migration to our processor set */
480      _Assert(
481        scheduled_thread->Scheduler.debug_real_cpu->heir != scheduled_thread
482      );
483      heir = scheduled_thread;
484    }
485  } else {
486    heir = scheduled_thread;
487  }
488
489  if ( heir != victim_thread ) {
490    _Thread_Set_CPU( heir, victim_cpu );
491    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
492  }
493}
494
495static inline void _Scheduler_SMP_Allocate_processor(
496  Scheduler_Context                *context,
497  Scheduler_Node                   *scheduled,
498  Scheduler_Node                   *victim,
499  Scheduler_SMP_Allocate_processor  allocate_processor
500)
501{
502  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
503  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
504
505  _Scheduler_SMP_Node_change_state(
506    _Scheduler_SMP_Node_downcast( scheduled ),
507    SCHEDULER_SMP_NODE_SCHEDULED
508  );
509  _Scheduler_Thread_change_state( scheduled_thread, THREAD_SCHEDULER_SCHEDULED );
510
511  ( *allocate_processor )( context, scheduled_thread, victim_thread );
512}
513
514static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
515  Scheduler_Context *context,
516  Scheduler_Node    *filter,
517  Chain_Node_order   order
518)
519{
520  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
521  Chain_Control *scheduled = &self->Scheduled;
522  Scheduler_Node *lowest_scheduled =
523    (Scheduler_Node *) _Chain_Last( scheduled );
524
525  (void) filter;
526  (void) order;
527
528  _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
529
530  return lowest_scheduled;
531}
532
533static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
534  Scheduler_Context                *context,
535  Scheduler_Node                   *node,
536  Scheduler_Node                   *lowest_scheduled,
537  Scheduler_SMP_Insert              insert_scheduled,
538  Scheduler_SMP_Move                move_from_scheduled_to_ready,
539  Scheduler_SMP_Allocate_processor  allocate_processor,
540  Scheduler_Release_idle_thread     release_idle_thread
541)
542{
543  Thread_Control *user = _Scheduler_Node_get_user( node );
544  Thread_Control *lowest_scheduled_user =
545    _Scheduler_Node_get_user( lowest_scheduled );
546  Thread_Control *needs_help;
547  Thread_Control *idle;
548
549  _Scheduler_SMP_Node_change_state(
550    _Scheduler_SMP_Node_downcast( lowest_scheduled ),
551    SCHEDULER_SMP_NODE_READY
552  );
553  _Scheduler_Thread_change_state(
554    lowest_scheduled_user,
555    THREAD_SCHEDULER_READY
556  );
557
558  _Scheduler_Thread_set_node( user, node );
559
560  _Scheduler_SMP_Allocate_processor(
561    context,
562    node,
563    lowest_scheduled,
564    allocate_processor
565  );
566
567  ( *insert_scheduled )( context, node );
568  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
569
570  idle = _Scheduler_Release_idle_thread(
571    context,
572    lowest_scheduled,
573    release_idle_thread
574  );
575  if ( idle == NULL ) {
576    needs_help = lowest_scheduled_user;
577  } else {
578    needs_help = NULL;
579  }
580
581  return needs_help;
582}
583
584/**
585 * @brief Enqueues a node according to the specified order function.
586 *
587 * The node must not be in the scheduled state.
588 *
589 * @param[in] context The scheduler instance context.
590 * @param[in] node The node to enqueue.
591 * @param[in] needs_help The thread needing help in case the node cannot be
592 *   scheduled.
593 * @param[in] order The order function.
594 * @param[in] insert_ready Function to insert a node into the set of ready
595 *   nodes.
596 * @param[in] insert_scheduled Function to insert a node into the set of
597 *   scheduled nodes.
598 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
599 *   of scheduled nodes to the set of ready nodes.
600 * @param[in] get_lowest_scheduled Function to select the node from the
601 *   scheduled nodes to replace.  It may not be possible to find one, in this
602 *   case a pointer must be returned so that the order functions returns false
603 *   if this pointer is passed as the second argument to the order function.
604 * @param[in] allocate_processor Function to allocate a processor to a node
605 *   based on the rules of the scheduler.
606 * @param[in] release_idle_thread Function to release an idle thread.
607 */
608static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
609  Scheduler_Context                  *context,
610  Scheduler_Node                     *node,
611  Thread_Control                     *needs_help,
612  Chain_Node_order                    order,
613  Scheduler_SMP_Insert                insert_ready,
614  Scheduler_SMP_Insert                insert_scheduled,
615  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
616  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
617  Scheduler_SMP_Allocate_processor    allocate_processor,
618  Scheduler_Release_idle_thread       release_idle_thread
619)
620{
621  Scheduler_Node *lowest_scheduled =
622    ( *get_lowest_scheduled )( context, node, order );
623
624  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
625    needs_help = _Scheduler_SMP_Enqueue_to_scheduled(
626      context,
627      node,
628      lowest_scheduled,
629      insert_scheduled,
630      move_from_scheduled_to_ready,
631      allocate_processor,
632      release_idle_thread
633    );
634  } else {
635    ( *insert_ready )( context, node );
636  }
637
638  return needs_help;
639}
640
641/**
642 * @brief Enqueues a scheduled node according to the specified order
643 * function.
644 *
645 * @param[in] context The scheduler instance context.
646 * @param[in] node The node to enqueue.
647 * @param[in] order The order function.
648 * @param[in] extract_from_ready Function to extract a node from the set of
649 *   ready nodes.
650 * @param[in] get_highest_ready Function to get the highest ready node.
651 * @param[in] insert_ready Function to insert a node into the set of ready
652 *   nodes.
653 * @param[in] insert_scheduled Function to insert a node into the set of
654 *   scheduled nodes.
655 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
656 *   of ready nodes to the set of scheduled nodes.
657 * @param[in] allocate_processor Function to allocate a processor to a node
658 *   based on the rules of the scheduler.
659 * @param[in] get_idle_thread Function to get an idle thread.
660 * @param[in] release_idle_thread Function to release an idle thread.
661 */
662static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
663  Scheduler_Context                *context,
664  Scheduler_Node                   *node,
665  Chain_Node_order                  order,
666  Scheduler_SMP_Extract             extract_from_ready,
667  Scheduler_SMP_Get_highest_ready   get_highest_ready,
668  Scheduler_SMP_Insert              insert_ready,
669  Scheduler_SMP_Insert              insert_scheduled,
670  Scheduler_SMP_Move                move_from_ready_to_scheduled,
671  Scheduler_SMP_Allocate_processor  allocate_processor,
672  Scheduler_Get_idle_thread         get_idle_thread,
673  Scheduler_Release_idle_thread     release_idle_thread
674)
675{
676  Thread_Control *needs_help;
677
678  while ( true ) {
679    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
680
681    _Assert( highest_ready != NULL );
682
683    /*
684     * The node has been extracted from the scheduled chain.  We have to place
685     * it now on the scheduled or ready set.
686     */
687    if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
688      ( *insert_scheduled )( context, node );
689
690      needs_help = NULL;
691
692      break;
693    } else if (
694      _Scheduler_Try_to_schedule_node(
695        context,
696        highest_ready,
697        get_idle_thread
698      )
699    ) {
700      Thread_Control *user = _Scheduler_Node_get_user( node );
701      Thread_Control *idle;
702
703      _Scheduler_SMP_Node_change_state(
704        _Scheduler_SMP_Node_downcast( node ),
705        SCHEDULER_SMP_NODE_READY
706      );
707      _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
708
709      _Scheduler_SMP_Allocate_processor(
710        context,
711        highest_ready,
712        node,
713        allocate_processor
714      );
715
716      ( *insert_ready )( context, node );
717      ( *move_from_ready_to_scheduled )( context, highest_ready );
718
719      idle = _Scheduler_Release_idle_thread(
720        context,
721        node,
722        release_idle_thread
723      );
724      if ( idle == NULL ) {
725        needs_help = user;
726      } else {
727        needs_help = NULL;
728      }
729
730      break;
731    } else {
732      _Scheduler_SMP_Node_change_state(
733        _Scheduler_SMP_Node_downcast( highest_ready ),
734        SCHEDULER_SMP_NODE_BLOCKED
735      );
736
737      ( *extract_from_ready )( context, highest_ready );
738    }
739  }
740
741  return needs_help;
742}
743
744static inline void _Scheduler_SMP_Extract_from_scheduled(
745  Scheduler_Node *node
746)
747{
748  _Chain_Extract_unprotected( &node->Node );
749}
750
751static inline void _Scheduler_SMP_Schedule_highest_ready(
752  Scheduler_Context                *context,
753  Scheduler_Node                   *victim,
754  Scheduler_SMP_Extract             extract_from_ready,
755  Scheduler_SMP_Get_highest_ready   get_highest_ready,
756  Scheduler_SMP_Move                move_from_ready_to_scheduled,
757  Scheduler_SMP_Allocate_processor  allocate_processor,
758  Scheduler_Get_idle_thread         get_idle_thread
759)
760{
761  while ( true ) {
762    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
763
764    _Assert( highest_ready != NULL );
765
766    if (
767      _Scheduler_Try_to_schedule_node(
768        context,
769        highest_ready,
770        get_idle_thread
771      )
772    ) {
773      _Scheduler_SMP_Allocate_processor(
774        context,
775        highest_ready,
776        victim,
777        allocate_processor
778      );
779
780      ( *move_from_ready_to_scheduled )( context, highest_ready );
781
782      break;
783    } else {
784      _Scheduler_SMP_Node_change_state(
785        _Scheduler_SMP_Node_downcast( highest_ready ),
786        SCHEDULER_SMP_NODE_BLOCKED
787      );
788
789      ( *extract_from_ready )( context, highest_ready );
790    }
791  }
792}
793
794/**
795 * @brief Blocks a thread.
796 *
797 * @param[in] context The scheduler instance context.
798 * @param[in] thread The thread of the scheduling operation.
799 * @param[in] extract_from_ready Function to extract a node from the set of
800 *   ready nodes.
801 * @param[in] get_highest_ready Function to get the highest ready node.
802 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
803 *   of ready nodes to the set of scheduled nodes.
804 * @param[in] get_idle_thread Function to get an idle thread.
805 */
806static inline void _Scheduler_SMP_Block(
807  Scheduler_Context                *context,
808  Thread_Control                   *thread,
809  Scheduler_SMP_Extract             extract_from_ready,
810  Scheduler_SMP_Get_highest_ready   get_highest_ready,
811  Scheduler_SMP_Move                move_from_ready_to_scheduled,
812  Scheduler_SMP_Allocate_processor  allocate_processor,
813  Scheduler_Get_idle_thread         get_idle_thread
814)
815{
816  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
817  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
818  bool block = _Scheduler_Block_node(
819    context,
820    &node->Base,
821    is_scheduled,
822    get_idle_thread
823  );
824
825  if ( block ) {
826    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
827
828    if ( is_scheduled ) {
829      _Scheduler_SMP_Extract_from_scheduled( &node->Base );
830
831      _Scheduler_SMP_Schedule_highest_ready(
832        context,
833        &node->Base,
834        extract_from_ready,
835        get_highest_ready,
836        move_from_ready_to_scheduled,
837        allocate_processor,
838        get_idle_thread
839      );
840    } else {
841      ( *extract_from_ready )( context, &node->Base );
842    }
843  }
844}
845
846static inline Thread_Control *_Scheduler_SMP_Unblock(
847  Scheduler_Context             *context,
848  Thread_Control                *thread,
849  Scheduler_SMP_Enqueue          enqueue_fifo,
850  Scheduler_Release_idle_thread  release_idle_thread
851)
852{
853  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
854  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
855  bool unblock = _Scheduler_Unblock_node(
856    context,
857    thread,
858    &node->Base,
859    is_scheduled,
860    release_idle_thread
861  );
862  Thread_Control *needs_help;
863
864  if ( unblock ) {
865    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
866
867    needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
868  } else {
869    needs_help = NULL;
870  }
871
872  return needs_help;
873}
874
875static inline Thread_Control *_Scheduler_SMP_Change_priority(
876  Scheduler_Context               *context,
877  Thread_Control                  *thread,
878  Priority_Control                 new_priority,
879  bool                             prepend_it,
880  Scheduler_SMP_Extract            extract_from_ready,
881  Scheduler_SMP_Update             update,
882  Scheduler_SMP_Enqueue            enqueue_fifo,
883  Scheduler_SMP_Enqueue            enqueue_lifo,
884  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo,
885  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_lifo
886)
887{
888  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( thread );
889  Thread_Control *needs_help;
890
891  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
892    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
893
894    ( *update )( context, &node->Base, new_priority );
895
896    if ( prepend_it ) {
897      needs_help = ( *enqueue_scheduled_lifo )( context, &node->Base );
898    } else {
899      needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
900    }
901  } else if ( node->state == SCHEDULER_SMP_NODE_READY ) {
902    ( *extract_from_ready )( context, &node->Base );
903
904    ( *update )( context, &node->Base, new_priority );
905
906    if ( prepend_it ) {
907      needs_help = ( *enqueue_lifo )( context, &node->Base, NULL );
908    } else {
909      needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
910    }
911  } else {
912    ( *update )( context, &node->Base, new_priority );
913
914    needs_help = NULL;
915  }
916
917  return needs_help;
918}
919
920static inline Thread_Control *_Scheduler_SMP_Ask_for_help(
921  Scheduler_Context                  *context,
922  Thread_Control                     *offers_help,
923  Thread_Control                     *needs_help,
924  Scheduler_SMP_Enqueue               enqueue_fifo,
925  Scheduler_Release_idle_thread       release_idle_thread
926)
927{
928  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help );
929  Thread_Control *next_needs_help = NULL;
930  Thread_Control *previous_accepts_help;
931
932  previous_accepts_help = node->Base.accepts_help;
933  node->Base.accepts_help = needs_help;
934
935  switch ( node->state ) {
936    case SCHEDULER_SMP_NODE_READY:
937      next_needs_help =
938        _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help );
939      break;
940    case SCHEDULER_SMP_NODE_SCHEDULED:
941      next_needs_help = _Scheduler_Ask_scheduled_node_for_help(
942        context,
943        &node->Base,
944        offers_help,
945        needs_help,
946        previous_accepts_help,
947        release_idle_thread
948      );
949      break;
950    case SCHEDULER_SMP_NODE_BLOCKED:
951      if (
952        _Scheduler_Ask_blocked_node_for_help(
953          context,
954          &node->Base,
955          offers_help,
956          needs_help
957        )
958      ) {
959        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
960
961        next_needs_help = ( *enqueue_fifo )(
962          context,
963          &node->Base,
964          needs_help
965        );
966      }
967      break;
968  }
969
970  return next_needs_help;
971}
972
973static inline Thread_Control *_Scheduler_SMP_Yield(
974  Scheduler_Context               *context,
975  Thread_Control                  *thread,
976  Scheduler_SMP_Extract            extract_from_ready,
977  Scheduler_SMP_Enqueue            enqueue_fifo,
978  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo
979)
980{
981  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
982  Thread_Control *needs_help;
983
984  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
985    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
986
987    needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
988  } else {
989    ( *extract_from_ready )( context, &node->Base );
990
991    needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
992  }
993
994  return needs_help;
995}
996
997static inline void _Scheduler_SMP_Insert_scheduled_lifo(
998  Scheduler_Context *context,
999  Scheduler_Node    *node_to_insert
1000)
1001{
1002  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1003
1004  _Chain_Insert_ordered_unprotected(
1005    &self->Scheduled,
1006    &node_to_insert->Node,
1007    _Scheduler_SMP_Insert_priority_lifo_order
1008  );
1009}
1010
1011static inline void _Scheduler_SMP_Insert_scheduled_fifo(
1012  Scheduler_Context *context,
1013  Scheduler_Node    *node_to_insert
1014)
1015{
1016  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1017
1018  _Chain_Insert_ordered_unprotected(
1019    &self->Scheduled,
1020    &node_to_insert->Node,
1021    _Scheduler_SMP_Insert_priority_fifo_order
1022  );
1023}
1024
1025/** @} */
1026
1027#ifdef __cplusplus
1028}
1029#endif /* __cplusplus */
1030
1031#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.