source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ cceb19f4

4.115
Last change on this file since cceb19f4 was cceb19f4, checked in by Luca Bonato <lohathe@…>, on 11/21/14 at 10:01:34

smp: Fix scheduler helping protocol

New test case for smptests/smpmrsp01.

Fix _Scheduler_Block_node() in case the node is in the
SCHEDULER_HELP_ACTIVE_RIVAL helping state. For example a
rtems_task_suspend() on a task waiting for a MrsP semaphore.

Fix _Scheduler_Unblock_node() in case the node is in the
SCHEDULER_SMP_NODE_READY state. For example a rtems_task_resume() on a
task owning or waiting for a MrsP semaphore.

  • Property mode set to 100644
File size: 26.5 KB
RevLine 
[9d83f58a]1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
[494c2e3]10 * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
[9d83f58a]11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
[c499856]20 * http://www.rtems.org/license/LICENSE.
[9d83f58a]21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
[38b59a6]27#include <rtems/score/assert.h>
[48c4a55]28#include <rtems/score/chainimpl.h>
[38b59a6]29#include <rtems/score/schedulersimpleimpl.h>
[9d83f58a]30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
[c6522a65]38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
[f39f667a]40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
41 * - @ref SCHEDULER_SMP_NODE_READY.
[c6522a65]42 *
[f39f667a]43 * State transitions are triggered via basic operations
[c0bff5e]44 * - _Scheduler_SMP_Enqueue_ordered(),
45 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
[f39f667a]46 * - _Scheduler_SMP_Block().
[c6522a65]47 *
48 * @dot
49 * digraph {
50 *   node [style="filled"];
51 *
52 *   bs [label="BLOCKED"];
53 *   ss [label="SCHEDULED", fillcolor="green"];
54 *   rs [label="READY", fillcolor="red"];
55 *
56 *   edge [label="enqueue"];
57 *   edge [fontcolor="darkgreen", color="darkgreen"];
58 *
59 *   bs -> ss;
60 *
61 *   edge [fontcolor="red", color="red"];
62 *
63 *   bs -> rs;
64 *
65 *   edge [label="enqueue other"];
66 *
67 *   ss -> rs;
68 *
[f39f667a]69 *   edge [label="block"];
[c6522a65]70 *   edge [fontcolor="black", color="black"];
71 *
[b532bb2c]72 *   ss -> bs;
[c6522a65]73 *   rs -> bs;
74 *
[f39f667a]75 *   edge [label="block other"];
[c6522a65]76 *   edge [fontcolor="darkgreen", color="darkgreen"];
77 *
78 *   rs -> ss;
79 * }
80 * @enddot
81 *
82 * During system initialization each processor of the scheduler instance starts
83 * with an idle thread assigned to it.  Lets have a look at an example with two
84 * idle threads I and J with priority 5.  We also have blocked threads A, B and
[2d96533]85 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
86 * with respect to the thread priority from left to right in the below
87 * diagrams.  The highest priority node (lowest priority number) is the
88 * leftmost node.  Since the processor assignment is independent of the thread
89 * priority the processor indices may move from one state to the other.
[c6522a65]90 *
91 * @dot
92 * digraph {
93 *   node [style="filled"];
94 *   edge [dir="none"];
95 *   subgraph {
96 *     rank = same;
97 *
98 *     i [label="I (5)", fillcolor="green"];
99 *     j [label="J (5)", fillcolor="green"];
100 *     a [label="A (1)"];
101 *     b [label="B (2)"];
102 *     c [label="C (3)"];
103 *     i -> j;
104 *   }
105 *
106 *   subgraph {
107 *     rank = same;
108 *
109 *     p0 [label="PROCESSOR 0", shape="box"];
110 *     p1 [label="PROCESSOR 1", shape="box"];
111 *   }
112 *
113 *   i -> p0;
114 *   j -> p1;
115 * }
116 * @enddot
117 *
118 * Lets start A.  For this an enqueue operation is performed.
119 *
120 * @dot
121 * digraph {
122 *   node [style="filled"];
123 *   edge [dir="none"];
124 *
125 *   subgraph {
126 *     rank = same;
127 *
128 *     i [label="I (5)", fillcolor="green"];
129 *     j [label="J (5)", fillcolor="red"];
130 *     a [label="A (1)", fillcolor="green"];
131 *     b [label="B (2)"];
132 *     c [label="C (3)"];
133 *     a -> i;
134 *   }
135 *
136 *   subgraph {
137 *     rank = same;
138 *
139 *     p0 [label="PROCESSOR 0", shape="box"];
140 *     p1 [label="PROCESSOR 1", shape="box"];
141 *   }
142 *
143 *   i -> p0;
144 *   a -> p1;
145 * }
146 * @enddot
147 *
148 * Lets start C.
149 *
150 * @dot
151 * digraph {
152 *   node [style="filled"];
153 *   edge [dir="none"];
154 *
155 *   subgraph {
156 *     rank = same;
157 *
158 *     a [label="A (1)", fillcolor="green"];
159 *     c [label="C (3)", fillcolor="green"];
160 *     i [label="I (5)", fillcolor="red"];
161 *     j [label="J (5)", fillcolor="red"];
162 *     b [label="B (2)"];
163 *     a -> c;
164 *     i -> j;
165 *   }
166 *
167 *   subgraph {
168 *     rank = same;
169 *
170 *     p0 [label="PROCESSOR 0", shape="box"];
171 *     p1 [label="PROCESSOR 1", shape="box"];
172 *   }
173 *
174 *   c -> p0;
175 *   a -> p1;
176 * }
177 * @enddot
178 *
179 * Lets start B.
180 *
181 * @dot
182 * digraph {
183 *   node [style="filled"];
184 *   edge [dir="none"];
185 *
186 *   subgraph {
187 *     rank = same;
188 *
189 *     a [label="A (1)", fillcolor="green"];
190 *     b [label="B (2)", fillcolor="green"];
191 *     c [label="C (3)", fillcolor="red"];
192 *     i [label="I (5)", fillcolor="red"];
193 *     j [label="J (5)", fillcolor="red"];
194 *     a -> b;
195 *     c -> i -> j;
196 *   }
197 *
198 *   subgraph {
199 *     rank = same;
200 *
201 *     p0 [label="PROCESSOR 0", shape="box"];
202 *     p1 [label="PROCESSOR 1", shape="box"];
203 *   }
204 *
205 *   b -> p0;
206 *   a -> p1;
207 * }
208 * @enddot
209 *
[f39f667a]210 * Lets change the priority of thread A to 4.
[c6522a65]211 *
212 * @dot
213 * digraph {
214 *   node [style="filled"];
215 *   edge [dir="none"];
216 *
217 *   subgraph {
218 *     rank = same;
219 *
220 *     b [label="B (2)", fillcolor="green"];
221 *     c [label="C (3)", fillcolor="green"];
222 *     a [label="A (4)", fillcolor="red"];
223 *     i [label="I (5)", fillcolor="red"];
224 *     j [label="J (5)", fillcolor="red"];
225 *     b -> c;
226 *     a -> i -> j;
227 *   }
228 *
229 *   subgraph {
230 *     rank = same;
231 *
232 *     p0 [label="PROCESSOR 0", shape="box"];
233 *     p1 [label="PROCESSOR 1", shape="box"];
234 *   }
235 *
236 *   b -> p0;
237 *   c -> p1;
238 * }
239 * @enddot
240 *
[f39f667a]241 * Now perform a blocking operation with thread B.  Please note that thread A
242 * migrated now from processor 0 to processor 1 and thread C still executes on
243 * processor 1.
[c6522a65]244 *
245 * @dot
246 * digraph {
247 *   node [style="filled"];
248 *   edge [dir="none"];
249 *
250 *   subgraph {
251 *     rank = same;
252 *
253 *     c [label="C (3)", fillcolor="green"];
[f39f667a]254 *     a [label="A (4)", fillcolor="green"];
[c6522a65]255 *     i [label="I (5)", fillcolor="red"];
256 *     j [label="J (5)", fillcolor="red"];
[f39f667a]257 *     b [label="B (2)"];
258 *     c -> a;
[c6522a65]259 *     i -> j;
260 *   }
261 *
262 *   subgraph {
263 *     rank = same;
264 *
265 *     p0 [label="PROCESSOR 0", shape="box"];
266 *     p1 [label="PROCESSOR 1", shape="box"];
267 *   }
268 *
[f39f667a]269 *   a -> p0;
[c6522a65]270 *   c -> p1;
271 * }
272 * @enddot
273 *
[9d83f58a]274 * @{
275 */
276
[8f0c7a46]277typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
[238629f]278  Scheduler_Context *context,
[8f0c7a46]279  Scheduler_Node    *node
[238629f]280);
281
[8f0c7a46]282typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
[238629f]283  Scheduler_Context *context,
[8f0c7a46]284  Scheduler_Node    *filter,
[238629f]285  Chain_Node_order   order
[48c4a55]286);
287
288typedef void ( *Scheduler_SMP_Extract )(
[3730a07f]289  Scheduler_Context *context,
[8f0c7a46]290  Scheduler_Node    *node_to_extract
[48c4a55]291);
292
293typedef void ( *Scheduler_SMP_Insert )(
[3730a07f]294  Scheduler_Context *context,
[8f0c7a46]295  Scheduler_Node    *node_to_insert
[48c4a55]296);
297
298typedef void ( *Scheduler_SMP_Move )(
[3730a07f]299  Scheduler_Context *context,
[8f0c7a46]300  Scheduler_Node    *node_to_move
[48c4a55]301);
302
[f39f667a]303typedef void ( *Scheduler_SMP_Update )(
304  Scheduler_Context *context,
[8f0c7a46]305  Scheduler_Node    *node_to_update,
[d9b54da]306  Priority_Control   new_priority
[f39f667a]307);
308
[8568341]309typedef Thread_Control *( *Scheduler_SMP_Enqueue )(
310  Scheduler_Context *context,
311  Scheduler_Node    *node_to_enqueue,
312  Thread_Control    *needs_help
313);
314
315typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )(
[f39f667a]316  Scheduler_Context *context,
[8f0c7a46]317  Scheduler_Node    *node_to_enqueue
[f39f667a]318);
319
[238629f]320typedef void ( *Scheduler_SMP_Allocate_processor )(
[8f0c7a46]321  Scheduler_Context *context,
[19e41767]322  Thread_Control    *scheduled,
323  Thread_Control    *victim
[238629f]324);
325
[8f0c7a46]326static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
327  const Chain_Node *to_insert,
328  const Chain_Node *next
329)
330{
331  const Scheduler_SMP_Node *node_to_insert =
332    (const Scheduler_SMP_Node *) to_insert;
333  const Scheduler_SMP_Node *node_next =
334    (const Scheduler_SMP_Node *) next;
335
336  return node_to_insert->priority <= node_next->priority;
337}
338
339static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
340  const Chain_Node *to_insert,
341  const Chain_Node *next
342)
343{
344  const Scheduler_SMP_Node *node_to_insert =
345    (const Scheduler_SMP_Node *) to_insert;
346  const Scheduler_SMP_Node *node_next =
347    (const Scheduler_SMP_Node *) next;
348
349  return node_to_insert->priority < node_next->priority;
350}
351
[3730a07f]352static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
353  Scheduler_Context *context
354)
355{
356  return (Scheduler_SMP_Context *) context;
357}
358
[494c2e3]359static inline void _Scheduler_SMP_Initialize(
[e1598a6]360  Scheduler_SMP_Context *self
[494c2e3]361)
[9d83f58a]362{
[494c2e3]363  _Chain_Initialize_empty( &self->Scheduled );
[5c3d250]364  _Chain_Initialize_empty( &self->Idle_threads );
[9d83f58a]365}
366
[08d9760]367static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
[beab7329]368  Thread_Control *thread
369)
370{
[08d9760]371  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread );
[beab7329]372}
373
[5c3d250]374static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
375  Thread_Control *thread
376)
377{
378  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_own_node( thread );
379}
380
[8f0c7a46]381static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
382  Scheduler_Node *node
383)
384{
385  return (Scheduler_SMP_Node *) node;
386}
387
[beab7329]388static inline void _Scheduler_SMP_Node_initialize(
[8f0c7a46]389  Scheduler_SMP_Node *node,
390  Thread_Control     *thread
[beab7329]391)
392{
[8f0c7a46]393  _Scheduler_Node_do_initialize( &node->Base, thread );
[beab7329]394  node->state = SCHEDULER_SMP_NODE_BLOCKED;
395}
396
[8f0c7a46]397static inline void _Scheduler_SMP_Node_update_priority(
398  Scheduler_SMP_Node *node,
399  Priority_Control    new_priority
400)
401{
402  node->priority = new_priority;
403}
404
[f39f667a]405extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
[beab7329]406
407static inline void _Scheduler_SMP_Node_change_state(
[d9b54da]408  Scheduler_SMP_Node      *node,
[beab7329]409  Scheduler_SMP_Node_state new_state
410)
411{
412  _Assert(
413    _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ]
414  );
415
416  node->state = new_state;
417}
418
[38b59a6]419static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
[8f0c7a46]420  const Scheduler_Context *context,
421  const Per_CPU_Control   *cpu
[38b59a6]422)
423{
[8f0c7a46]424  return cpu->scheduler_context == context;
[38b59a6]425}
426
[5c3d250]427static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
[27783f6]428  Scheduler_Context *context
[5c3d250]429)
430{
431  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
432  Thread_Control *idle = (Thread_Control *)
433    _Chain_Get_first_unprotected( &self->Idle_threads );
434
435  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
436
437  return idle;
438}
439
440static inline void _Scheduler_SMP_Release_idle_thread(
[27783f6]441  Scheduler_Context *context,
442  Thread_Control    *idle
[5c3d250]443)
444{
445  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
446
447  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
448}
449
[19e41767]450static inline void _Scheduler_SMP_Allocate_processor_lazy(
[8f0c7a46]451  Scheduler_Context *context,
[19e41767]452  Thread_Control    *scheduled_thread,
453  Thread_Control    *victim_thread
[fc2ad63]454)
455{
[8f0c7a46]456  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
457  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
[38b59a6]458  Per_CPU_Control *cpu_self = _Per_CPU_Get();
[fc2ad63]459  Thread_Control *heir;
460
[38b59a6]461  _Assert( _ISR_Get_level() != 0 );
[fc2ad63]462
[8f0c7a46]463  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
464    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
465      heir = scheduled_cpu->heir;
[835b88b]466      _Thread_Dispatch_update_heir(
[8f0c7a46]467        cpu_self,
468        scheduled_cpu,
469        scheduled_thread
470      );
[38b59a6]471    } else {
472      /* We have to force a migration to our processor set */
[8f0c7a46]473      _Assert(
474        scheduled_thread->Scheduler.debug_real_cpu->heir != scheduled_thread
475      );
476      heir = scheduled_thread;
[38b59a6]477    }
[fc2ad63]478  } else {
[8f0c7a46]479    heir = scheduled_thread;
[fc2ad63]480  }
481
[8f0c7a46]482  if ( heir != victim_thread ) {
483    _Thread_Set_CPU( heir, victim_cpu );
[835b88b]484    _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
[fc2ad63]485  }
486}
487
[19e41767]488static inline void _Scheduler_SMP_Allocate_processor(
489  Scheduler_Context                *context,
490  Scheduler_Node                   *scheduled,
491  Scheduler_Node                   *victim,
492  Scheduler_SMP_Allocate_processor  allocate_processor
493)
494{
[ac532f3]495  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
496  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
[19e41767]497
498  _Scheduler_SMP_Node_change_state(
499    _Scheduler_SMP_Node_downcast( scheduled ),
500    SCHEDULER_SMP_NODE_SCHEDULED
501  );
[5c3d250]502  _Scheduler_Thread_change_state( scheduled_thread, THREAD_SCHEDULER_SCHEDULED );
[19e41767]503
504  ( *allocate_processor )( context, scheduled_thread, victim_thread );
505}
506
[8f0c7a46]507static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
[238629f]508  Scheduler_Context *context,
[8f0c7a46]509  Scheduler_Node    *filter,
[238629f]510  Chain_Node_order   order
[aea4a91]511)
512{
[238629f]513  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
[494c2e3]514  Chain_Control *scheduled = &self->Scheduled;
[8f0c7a46]515  Scheduler_Node *lowest_scheduled =
516    (Scheduler_Node *) _Chain_Last( scheduled );
[aea4a91]517
[8f0c7a46]518  (void) filter;
519  (void) order;
[aea4a91]520
[5c632c4]521  _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
[238629f]522
[8f0c7a46]523  return lowest_scheduled;
[aea4a91]524}
525
[5c3d250]526static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
527  Scheduler_Context                *context,
528  Scheduler_Node                   *node,
529  Scheduler_Node                   *lowest_scheduled,
530  Scheduler_SMP_Insert              insert_scheduled,
531  Scheduler_SMP_Move                move_from_scheduled_to_ready,
[27783f6]532  Scheduler_SMP_Allocate_processor  allocate_processor
[5c3d250]533)
534{
535  Thread_Control *user = _Scheduler_Node_get_user( node );
536  Thread_Control *lowest_scheduled_user =
537    _Scheduler_Node_get_user( lowest_scheduled );
538  Thread_Control *needs_help;
539  Thread_Control *idle;
540
541  _Scheduler_SMP_Node_change_state(
542    _Scheduler_SMP_Node_downcast( lowest_scheduled ),
543    SCHEDULER_SMP_NODE_READY
544  );
545  _Scheduler_Thread_change_state(
546    lowest_scheduled_user,
547    THREAD_SCHEDULER_READY
548  );
549
550  _Scheduler_Thread_set_node( user, node );
551
552  _Scheduler_SMP_Allocate_processor(
553    context,
554    node,
555    lowest_scheduled,
556    allocate_processor
557  );
558
559  ( *insert_scheduled )( context, node );
560  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
561
562  idle = _Scheduler_Release_idle_thread(
563    context,
564    lowest_scheduled,
[27783f6]565    _Scheduler_SMP_Release_idle_thread
[5c3d250]566  );
567  if ( idle == NULL ) {
568    needs_help = lowest_scheduled_user;
569  } else {
570    needs_help = NULL;
571  }
572
573  return needs_help;
574}
575
[c6522a65]576/**
[8f0c7a46]577 * @brief Enqueues a node according to the specified order function.
[c6522a65]578 *
[8f0c7a46]579 * The node must not be in the scheduled state.
[c0bff5e]580 *
[c6522a65]581 * @param[in] context The scheduler instance context.
[8f0c7a46]582 * @param[in] node The node to enqueue.
[8568341]583 * @param[in] needs_help The thread needing help in case the node cannot be
584 *   scheduled.
[c6522a65]585 * @param[in] order The order function.
586 * @param[in] insert_ready Function to insert a node into the set of ready
[238629f]587 *   nodes.
[c6522a65]588 * @param[in] insert_scheduled Function to insert a node into the set of
[238629f]589 *   scheduled nodes.
[c6522a65]590 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
[238629f]591 *   of scheduled nodes to the set of ready nodes.
[8f0c7a46]592 * @param[in] get_lowest_scheduled Function to select the node from the
[82df6f3]593 *   scheduled nodes to replace.  It may not be possible to find one, in this
594 *   case a pointer must be returned so that the order functions returns false
595 *   if this pointer is passed as the second argument to the order function.
[8f0c7a46]596 * @param[in] allocate_processor Function to allocate a processor to a node
[238629f]597 *   based on the rules of the scheduler.
[c6522a65]598 */
[8568341]599static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
[d9b54da]600  Scheduler_Context                  *context,
[8f0c7a46]601  Scheduler_Node                     *node,
[8568341]602  Thread_Control                     *needs_help,
[d9b54da]603  Chain_Node_order                    order,
[238629f]604  Scheduler_SMP_Insert                insert_ready,
605  Scheduler_SMP_Insert                insert_scheduled,
606  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
607  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
[27783f6]608  Scheduler_SMP_Allocate_processor    allocate_processor
[48c4a55]609)
[c0bff5e]610{
[8f0c7a46]611  Scheduler_Node *lowest_scheduled =
612    ( *get_lowest_scheduled )( context, node, order );
[c0bff5e]613
[8f0c7a46]614  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
[5c3d250]615    needs_help = _Scheduler_SMP_Enqueue_to_scheduled(
[19e41767]616      context,
617      node,
618      lowest_scheduled,
[5c3d250]619      insert_scheduled,
620      move_from_scheduled_to_ready,
[27783f6]621      allocate_processor
[19e41767]622    );
[c0bff5e]623  } else {
[8f0c7a46]624    ( *insert_ready )( context, node );
[c0bff5e]625  }
[8568341]626
627  return needs_help;
[c0bff5e]628}
629
630/**
[8f0c7a46]631 * @brief Enqueues a scheduled node according to the specified order
[c0bff5e]632 * function.
633 *
634 * @param[in] context The scheduler instance context.
[8f0c7a46]635 * @param[in] node The node to enqueue.
[c0bff5e]636 * @param[in] order The order function.
[5c3d250]637 * @param[in] extract_from_ready Function to extract a node from the set of
638 *   ready nodes.
[c0bff5e]639 * @param[in] get_highest_ready Function to get the highest ready node.
640 * @param[in] insert_ready Function to insert a node into the set of ready
[238629f]641 *   nodes.
[c0bff5e]642 * @param[in] insert_scheduled Function to insert a node into the set of
[238629f]643 *   scheduled nodes.
[c0bff5e]644 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
[238629f]645 *   of ready nodes to the set of scheduled nodes.
[8f0c7a46]646 * @param[in] allocate_processor Function to allocate a processor to a node
[238629f]647 *   based on the rules of the scheduler.
[c0bff5e]648 */
[8568341]649static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
[d9b54da]650  Scheduler_Context                *context,
[8f0c7a46]651  Scheduler_Node                   *node,
[238629f]652  Chain_Node_order                  order,
[5c3d250]653  Scheduler_SMP_Extract             extract_from_ready,
[238629f]654  Scheduler_SMP_Get_highest_ready   get_highest_ready,
655  Scheduler_SMP_Insert              insert_ready,
656  Scheduler_SMP_Insert              insert_scheduled,
657  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]658  Scheduler_SMP_Allocate_processor  allocate_processor
[c0bff5e]659)
[48c4a55]660{
[8568341]661  Thread_Control *needs_help;
[48c4a55]662
[5c3d250]663  while ( true ) {
664    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
665
666    /*
667     * The node has been extracted from the scheduled chain.  We have to place
668     * it now on the scheduled or ready set.
669     */
670    if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
671      ( *insert_scheduled )( context, node );
672
673      needs_help = NULL;
674
675      break;
676    } else if (
677      _Scheduler_Try_to_schedule_node(
678        context,
679        highest_ready,
[27783f6]680        _Scheduler_SMP_Get_idle_thread
[5c3d250]681      )
682    ) {
683      Thread_Control *user = _Scheduler_Node_get_user( node );
684      Thread_Control *idle;
685
686      _Scheduler_SMP_Node_change_state(
687        _Scheduler_SMP_Node_downcast( node ),
688        SCHEDULER_SMP_NODE_READY
689      );
690      _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
[c0bff5e]691
[5c3d250]692      _Scheduler_SMP_Allocate_processor(
693        context,
694        highest_ready,
695        node,
696        allocate_processor
697      );
[8568341]698
[5c3d250]699      ( *insert_ready )( context, node );
700      ( *move_from_ready_to_scheduled )( context, highest_ready );
[19e41767]701
[5c3d250]702      idle = _Scheduler_Release_idle_thread(
703        context,
704        node,
[27783f6]705        _Scheduler_SMP_Release_idle_thread
[5c3d250]706      );
707      if ( idle == NULL ) {
708        needs_help = user;
709      } else {
710        needs_help = NULL;
711      }
[19e41767]712
[5c3d250]713      break;
714    } else {
715      _Scheduler_SMP_Node_change_state(
716        _Scheduler_SMP_Node_downcast( highest_ready ),
717        SCHEDULER_SMP_NODE_BLOCKED
718      );
[8568341]719
[5c3d250]720      ( *extract_from_ready )( context, highest_ready );
721    }
[48c4a55]722  }
[8568341]723
724  return needs_help;
[48c4a55]725}
726
[c0bff5e]727static inline void _Scheduler_SMP_Extract_from_scheduled(
[8f0c7a46]728  Scheduler_Node *node
[c0bff5e]729)
[f39f667a]730{
[8f0c7a46]731  _Chain_Extract_unprotected( &node->Node );
[f39f667a]732}
733
[48c4a55]734static inline void _Scheduler_SMP_Schedule_highest_ready(
[d9b54da]735  Scheduler_Context                *context,
[8f0c7a46]736  Scheduler_Node                   *victim,
[5c3d250]737  Scheduler_SMP_Extract             extract_from_ready,
[d9b54da]738  Scheduler_SMP_Get_highest_ready   get_highest_ready,
739  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]740  Scheduler_SMP_Allocate_processor  allocate_processor
[48c4a55]741)
742{
[5c3d250]743  while ( true ) {
744    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
745
746    if (
747      _Scheduler_Try_to_schedule_node(
748        context,
749        highest_ready,
[27783f6]750        _Scheduler_SMP_Get_idle_thread
[5c3d250]751      )
752    ) {
753      _Scheduler_SMP_Allocate_processor(
754        context,
755        highest_ready,
756        victim,
757        allocate_processor
758      );
[48c4a55]759
[5c3d250]760      ( *move_from_ready_to_scheduled )( context, highest_ready );
761
762      break;
763    } else {
764      _Scheduler_SMP_Node_change_state(
765        _Scheduler_SMP_Node_downcast( highest_ready ),
766        SCHEDULER_SMP_NODE_BLOCKED
767      );
[19e41767]768
[5c3d250]769      ( *extract_from_ready )( context, highest_ready );
770    }
771  }
[48c4a55]772}
773
[c6522a65]774/**
[f39f667a]775 * @brief Blocks a thread.
[c6522a65]776 *
777 * @param[in] context The scheduler instance context.
778 * @param[in] thread The thread of the scheduling operation.
[f39f667a]779 * @param[in] extract_from_ready Function to extract a node from the set of
[5c3d250]780 *   ready nodes.
[c6522a65]781 * @param[in] get_highest_ready Function to get the highest ready node.
782 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
[5c3d250]783 *   of ready nodes to the set of scheduled nodes.
[c6522a65]784 */
[f39f667a]785static inline void _Scheduler_SMP_Block(
[d9b54da]786  Scheduler_Context                *context,
787  Thread_Control                   *thread,
788  Scheduler_SMP_Extract             extract_from_ready,
789  Scheduler_SMP_Get_highest_ready   get_highest_ready,
790  Scheduler_SMP_Move                move_from_ready_to_scheduled,
[27783f6]791  Scheduler_SMP_Allocate_processor  allocate_processor
[48c4a55]792)
793{
[08d9760]794  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
[f39f667a]795  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
[cceb19f4]796  bool block;
797
798  _Assert( is_scheduled || node->state == SCHEDULER_SMP_NODE_READY );
799
800  block = _Scheduler_Block_node(
[5c3d250]801    context,
[cceb19f4]802    thread,
[5c3d250]803    &node->Base,
804    is_scheduled,
[27783f6]805    _Scheduler_SMP_Get_idle_thread
[5c3d250]806  );
807  if ( block ) {
808    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
[beab7329]809
[5c3d250]810    if ( is_scheduled ) {
811      _Scheduler_SMP_Extract_from_scheduled( &node->Base );
[48c4a55]812
[5c3d250]813      _Scheduler_SMP_Schedule_highest_ready(
814        context,
815        &node->Base,
816        extract_from_ready,
817        get_highest_ready,
818        move_from_ready_to_scheduled,
[27783f6]819        allocate_processor
[5c3d250]820      );
821    } else {
822      ( *extract_from_ready )( context, &node->Base );
823    }
[48c4a55]824  }
825}
826
[8568341]827static inline Thread_Control *_Scheduler_SMP_Unblock(
[5c3d250]828  Scheduler_Context             *context,
829  Thread_Control                *thread,
[27783f6]830  Scheduler_SMP_Enqueue          enqueue_fifo
[c0bff5e]831)
832{
[08d9760]833  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
[5c3d250]834  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
835  bool unblock = _Scheduler_Unblock_node(
836    context,
837    thread,
838    &node->Base,
839    is_scheduled,
[27783f6]840    _Scheduler_SMP_Release_idle_thread
[5c3d250]841  );
842  Thread_Control *needs_help;
[c0bff5e]843
[5c3d250]844  if ( unblock ) {
[cceb19f4]845    if ( node->state != SCHEDULER_SMP_NODE_READY ) {
846      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
847
848      needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
849    } else {
850      _Assert( node->state == SCHEDULER_SMP_NODE_READY );
851      _Assert( node->Base.idle == NULL );
[c0bff5e]852
[cceb19f4]853      if ( node->Base.accepts_help == thread ) {
854        _Assert( node->Base.help_state == SCHEDULER_HELP_ACTIVE_OWNER );
855        needs_help = thread;
856      } else {
857        _Assert( node->Base.help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
858        needs_help = NULL;
859      }
860    }
[5c3d250]861  } else {
862    needs_help = NULL;
863  }
864
865  return needs_help;
[c0bff5e]866}
867
[8568341]868static inline Thread_Control *_Scheduler_SMP_Change_priority(
869  Scheduler_Context               *context,
870  Thread_Control                  *thread,
871  Priority_Control                 new_priority,
872  bool                             prepend_it,
873  Scheduler_SMP_Extract            extract_from_ready,
874  Scheduler_SMP_Update             update,
875  Scheduler_SMP_Enqueue            enqueue_fifo,
876  Scheduler_SMP_Enqueue            enqueue_lifo,
877  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo,
878  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_lifo
[48c4a55]879)
880{
[5c3d250]881  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( thread );
[8568341]882  Thread_Control *needs_help;
[a336d51]883
[c0bff5e]884  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
[8f0c7a46]885    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
[c0bff5e]886
887    ( *update )( context, &node->Base, new_priority );
888
889    if ( prepend_it ) {
[8568341]890      needs_help = ( *enqueue_scheduled_lifo )( context, &node->Base );
[c0bff5e]891    } else {
[8568341]892      needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
[c0bff5e]893    }
[5c3d250]894  } else if ( node->state == SCHEDULER_SMP_NODE_READY ) {
[8f0c7a46]895    ( *extract_from_ready )( context, &node->Base );
[48c4a55]896
[c0bff5e]897    ( *update )( context, &node->Base, new_priority );
[f39f667a]898
[c0bff5e]899    if ( prepend_it ) {
[8568341]900      needs_help = ( *enqueue_lifo )( context, &node->Base, NULL );
[c0bff5e]901    } else {
[8568341]902      needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
[c0bff5e]903    }
[5c3d250]904  } else {
905    ( *update )( context, &node->Base, new_priority );
906
907    needs_help = NULL;
[f39f667a]908  }
[8568341]909
910  return needs_help;
[48c4a55]911}
912
[5c3d250]913static inline Thread_Control *_Scheduler_SMP_Ask_for_help(
914  Scheduler_Context                  *context,
915  Thread_Control                     *offers_help,
916  Thread_Control                     *needs_help,
[27783f6]917  Scheduler_SMP_Enqueue               enqueue_fifo
[5c3d250]918)
919{
920  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help );
921  Thread_Control *next_needs_help = NULL;
922  Thread_Control *previous_accepts_help;
923
924  previous_accepts_help = node->Base.accepts_help;
925  node->Base.accepts_help = needs_help;
926
927  switch ( node->state ) {
928    case SCHEDULER_SMP_NODE_READY:
929      next_needs_help =
930        _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help );
931      break;
932    case SCHEDULER_SMP_NODE_SCHEDULED:
933      next_needs_help = _Scheduler_Ask_scheduled_node_for_help(
934        context,
935        &node->Base,
936        offers_help,
937        needs_help,
938        previous_accepts_help,
[27783f6]939        _Scheduler_SMP_Release_idle_thread
[5c3d250]940      );
941      break;
942    case SCHEDULER_SMP_NODE_BLOCKED:
943      if (
944        _Scheduler_Ask_blocked_node_for_help(
945          context,
946          &node->Base,
947          offers_help,
948          needs_help
949        )
950      ) {
951        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
952
953        next_needs_help = ( *enqueue_fifo )(
954          context,
955          &node->Base,
956          needs_help
957        );
958      }
959      break;
960  }
961
962  return next_needs_help;
963}
964
[8568341]965static inline Thread_Control *_Scheduler_SMP_Yield(
966  Scheduler_Context               *context,
967  Thread_Control                  *thread,
968  Scheduler_SMP_Extract            extract_from_ready,
969  Scheduler_SMP_Enqueue            enqueue_fifo,
970  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo
[701dd96f]971)
972{
[08d9760]973  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
[8568341]974  Thread_Control *needs_help;
[701dd96f]975
976  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
[8f0c7a46]977    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
[701dd96f]978
[8568341]979    needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
[701dd96f]980  } else {
[8f0c7a46]981    ( *extract_from_ready )( context, &node->Base );
[701dd96f]982
[8568341]983    needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
[701dd96f]984  }
[8568341]985
986  return needs_help;
[701dd96f]987}
988
[48c4a55]989static inline void _Scheduler_SMP_Insert_scheduled_lifo(
[3730a07f]990  Scheduler_Context *context,
[8f0c7a46]991  Scheduler_Node    *node_to_insert
[48c4a55]992)
993{
[3730a07f]994  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
995
[48c4a55]996  _Chain_Insert_ordered_unprotected(
[494c2e3]997    &self->Scheduled,
[8f0c7a46]998    &node_to_insert->Node,
999    _Scheduler_SMP_Insert_priority_lifo_order
[48c4a55]1000  );
1001}
1002
1003static inline void _Scheduler_SMP_Insert_scheduled_fifo(
[3730a07f]1004  Scheduler_Context *context,
[8f0c7a46]1005  Scheduler_Node    *node_to_insert
[48c4a55]1006)
1007{
[3730a07f]1008  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
1009
[48c4a55]1010  _Chain_Insert_ordered_unprotected(
[494c2e3]1011    &self->Scheduled,
[8f0c7a46]1012    &node_to_insert->Node,
1013    _Scheduler_SMP_Insert_priority_fifo_order
[48c4a55]1014  );
1015}
1016
[9d83f58a]1017/** @} */
1018
1019#ifdef __cplusplus
1020}
1021#endif /* __cplusplus */
1022
1023#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.