source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ 238629f

4.115
Last change on this file since 238629f was 238629f, checked in by Joel Sherrill <joel.sherrill@…>, on 05/19/14 at 20:26:55

Add SMP Priority Scheduler with Affinity

This scheduler attempts to account for needed thread migrations caused
as a side-effect of a thread state, affinity, or priority change operation.

This scheduler has its own allocate_processor handler named
_Scheduler_SMP_Allocate_processor_exact() because
_Scheduler_SMP_Allocate_processor() attempts to prevent an executing
thread from moving off its current CPU without considering affinity.
Without this, the scheduler makes all the right decisions and then
they are discarded at the end.

==Side Effects of Adding This Scheduler==

Added Thread_Control * parameter to Scheduler_SMP_Get_highest_ready type
so methods looking for the highest ready thread can filter by the processor
on which the thread blocking resides. This allows affinity to be considered.
Simple Priority SMP and Priority SMP ignore this parameter.

+ Added get_lowest_scheduled argument to _Scheduler_SMP_Enqueue_ordered().

+ Added allocate_processor argument to the following methods:

  • _Scheduler_SMP_Block()
  • _Scheduler_SMP_Enqueue_scheduled_ordered()
  • _Scheduler_SMP_Enqueue_scheduled_ordered()

+ schedulerprioritysmpimpl.h is a new file with prototypes for methods

which were formerly static in schedulerprioritysmp.c but now need to
be public to be shared with this scheduler.

NOTE:

_Scheduler_SMP_Get_lowest_ready() appears to have a path which would
allow it to return a NULL. Previously, _Scheduler_SMP_Enqueue_ordered()
would have asserted on it. If it cannot return a NULL,
_Scheduler_SMP_Get_lowest_ready() should have an assertions.

  • Property mode set to 100644
File size: 18.6 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
41 * - @ref SCHEDULER_SMP_NODE_READY.
42 *
43 * State transitions are triggered via basic operations
44 * - _Scheduler_SMP_Enqueue_ordered(),
45 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
46 * - _Scheduler_SMP_Block().
47 *
48 * @dot
49 * digraph {
50 *   node [style="filled"];
51 *
52 *   bs [label="BLOCKED"];
53 *   ss [label="SCHEDULED", fillcolor="green"];
54 *   rs [label="READY", fillcolor="red"];
55 *
56 *   edge [label="enqueue"];
57 *   edge [fontcolor="darkgreen", color="darkgreen"];
58 *
59 *   bs -> ss;
60 *
61 *   edge [fontcolor="red", color="red"];
62 *
63 *   bs -> rs;
64 *
65 *   edge [label="enqueue other"];
66 *
67 *   ss -> rs;
68 *
69 *   edge [label="block"];
70 *   edge [fontcolor="black", color="black"];
71 *
72 *   ss -> bs;
73 *   rs -> bs;
74 *
75 *   edge [label="block other"];
76 *   edge [fontcolor="darkgreen", color="darkgreen"];
77 *
78 *   rs -> ss;
79 * }
80 * @enddot
81 *
82 * During system initialization each processor of the scheduler instance starts
83 * with an idle thread assigned to it.  Lets have a look at an example with two
84 * idle threads I and J with priority 5.  We also have blocked threads A, B and
85 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
86 * with respect to the thread priority from left to right in the below
87 * diagrams.  The highest priority node (lowest priority number) is the
88 * leftmost node.  Since the processor assignment is independent of the thread
89 * priority the processor indices may move from one state to the other.
90 *
91 * @dot
92 * digraph {
93 *   node [style="filled"];
94 *   edge [dir="none"];
95 *   subgraph {
96 *     rank = same;
97 *
98 *     i [label="I (5)", fillcolor="green"];
99 *     j [label="J (5)", fillcolor="green"];
100 *     a [label="A (1)"];
101 *     b [label="B (2)"];
102 *     c [label="C (3)"];
103 *     i -> j;
104 *   }
105 *
106 *   subgraph {
107 *     rank = same;
108 *
109 *     p0 [label="PROCESSOR 0", shape="box"];
110 *     p1 [label="PROCESSOR 1", shape="box"];
111 *   }
112 *
113 *   i -> p0;
114 *   j -> p1;
115 * }
116 * @enddot
117 *
118 * Lets start A.  For this an enqueue operation is performed.
119 *
120 * @dot
121 * digraph {
122 *   node [style="filled"];
123 *   edge [dir="none"];
124 *
125 *   subgraph {
126 *     rank = same;
127 *
128 *     i [label="I (5)", fillcolor="green"];
129 *     j [label="J (5)", fillcolor="red"];
130 *     a [label="A (1)", fillcolor="green"];
131 *     b [label="B (2)"];
132 *     c [label="C (3)"];
133 *     a -> i;
134 *   }
135 *
136 *   subgraph {
137 *     rank = same;
138 *
139 *     p0 [label="PROCESSOR 0", shape="box"];
140 *     p1 [label="PROCESSOR 1", shape="box"];
141 *   }
142 *
143 *   i -> p0;
144 *   a -> p1;
145 * }
146 * @enddot
147 *
148 * Lets start C.
149 *
150 * @dot
151 * digraph {
152 *   node [style="filled"];
153 *   edge [dir="none"];
154 *
155 *   subgraph {
156 *     rank = same;
157 *
158 *     a [label="A (1)", fillcolor="green"];
159 *     c [label="C (3)", fillcolor="green"];
160 *     i [label="I (5)", fillcolor="red"];
161 *     j [label="J (5)", fillcolor="red"];
162 *     b [label="B (2)"];
163 *     a -> c;
164 *     i -> j;
165 *   }
166 *
167 *   subgraph {
168 *     rank = same;
169 *
170 *     p0 [label="PROCESSOR 0", shape="box"];
171 *     p1 [label="PROCESSOR 1", shape="box"];
172 *   }
173 *
174 *   c -> p0;
175 *   a -> p1;
176 * }
177 * @enddot
178 *
179 * Lets start B.
180 *
181 * @dot
182 * digraph {
183 *   node [style="filled"];
184 *   edge [dir="none"];
185 *
186 *   subgraph {
187 *     rank = same;
188 *
189 *     a [label="A (1)", fillcolor="green"];
190 *     b [label="B (2)", fillcolor="green"];
191 *     c [label="C (3)", fillcolor="red"];
192 *     i [label="I (5)", fillcolor="red"];
193 *     j [label="J (5)", fillcolor="red"];
194 *     a -> b;
195 *     c -> i -> j;
196 *   }
197 *
198 *   subgraph {
199 *     rank = same;
200 *
201 *     p0 [label="PROCESSOR 0", shape="box"];
202 *     p1 [label="PROCESSOR 1", shape="box"];
203 *   }
204 *
205 *   b -> p0;
206 *   a -> p1;
207 * }
208 * @enddot
209 *
210 * Lets change the priority of thread A to 4.
211 *
212 * @dot
213 * digraph {
214 *   node [style="filled"];
215 *   edge [dir="none"];
216 *
217 *   subgraph {
218 *     rank = same;
219 *
220 *     b [label="B (2)", fillcolor="green"];
221 *     c [label="C (3)", fillcolor="green"];
222 *     a [label="A (4)", fillcolor="red"];
223 *     i [label="I (5)", fillcolor="red"];
224 *     j [label="J (5)", fillcolor="red"];
225 *     b -> c;
226 *     a -> i -> j;
227 *   }
228 *
229 *   subgraph {
230 *     rank = same;
231 *
232 *     p0 [label="PROCESSOR 0", shape="box"];
233 *     p1 [label="PROCESSOR 1", shape="box"];
234 *   }
235 *
236 *   b -> p0;
237 *   c -> p1;
238 * }
239 * @enddot
240 *
241 * Now perform a blocking operation with thread B.  Please note that thread A
242 * migrated now from processor 0 to processor 1 and thread C still executes on
243 * processor 1.
244 *
245 * @dot
246 * digraph {
247 *   node [style="filled"];
248 *   edge [dir="none"];
249 *
250 *   subgraph {
251 *     rank = same;
252 *
253 *     c [label="C (3)", fillcolor="green"];
254 *     a [label="A (4)", fillcolor="green"];
255 *     i [label="I (5)", fillcolor="red"];
256 *     j [label="J (5)", fillcolor="red"];
257 *     b [label="B (2)"];
258 *     c -> a;
259 *     i -> j;
260 *   }
261 *
262 *   subgraph {
263 *     rank = same;
264 *
265 *     p0 [label="PROCESSOR 0", shape="box"];
266 *     p1 [label="PROCESSOR 1", shape="box"];
267 *   }
268 *
269 *   a -> p0;
270 *   c -> p1;
271 * }
272 * @enddot
273 *
274 * @{
275 */
276
277typedef Thread_Control *( *Scheduler_SMP_Get_highest_ready )(
278  Scheduler_Context *context,
279  Thread_Control    *blocking
280);
281
282typedef Thread_Control *( *Scheduler_SMP_Get_lowest_scheduled )(
283  Scheduler_Context *context,
284  Thread_Control    *thread,
285  Chain_Node_order   order
286);
287
288typedef void ( *Scheduler_SMP_Extract )(
289  Scheduler_Context *context,
290  Thread_Control *thread
291);
292
293typedef void ( *Scheduler_SMP_Insert )(
294  Scheduler_Context *context,
295  Thread_Control *thread_to_insert
296);
297
298typedef void ( *Scheduler_SMP_Move )(
299  Scheduler_Context *context,
300  Thread_Control *thread_to_move
301);
302
303typedef void ( *Scheduler_SMP_Update )(
304  Scheduler_Context *context,
305  Scheduler_Node *node,
306  Priority_Control new_priority
307);
308
309typedef void ( *Scheduler_SMP_Enqueue )(
310  Scheduler_Context *context,
311  Thread_Control *thread_to_enqueue
312);
313
314typedef void ( *Scheduler_SMP_Allocate_processor )(
315  Scheduler_SMP_Context *self,
316  Thread_Control *scheduled,
317  Thread_Control *victim
318);
319
320static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
321  Scheduler_Context *context
322)
323{
324  return (Scheduler_SMP_Context *) context;
325}
326
327static inline void _Scheduler_SMP_Initialize(
328  Scheduler_SMP_Context *self
329)
330{
331  _Chain_Initialize_empty( &self->Scheduled );
332}
333
334static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_get(
335  Thread_Control *thread
336)
337{
338  return (Scheduler_SMP_Node *) _Scheduler_Node_get( thread );
339}
340
341static inline void _Scheduler_SMP_Node_initialize(
342  Scheduler_SMP_Node *node
343)
344{
345  node->state = SCHEDULER_SMP_NODE_BLOCKED;
346}
347
348extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
349
350static inline void _Scheduler_SMP_Node_change_state(
351  Scheduler_SMP_Node *node,
352  Scheduler_SMP_Node_state new_state
353)
354{
355  _Assert(
356    _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ]
357  );
358
359  node->state = new_state;
360}
361
362static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
363  const Scheduler_SMP_Context *self,
364  const Per_CPU_Control *cpu
365)
366{
367  return cpu->scheduler_context == &self->Base;
368}
369
370static inline void _Scheduler_SMP_Update_heir(
371  Per_CPU_Control *cpu_self,
372  Per_CPU_Control *cpu_for_heir,
373  Thread_Control *heir
374)
375{
376  cpu_for_heir->heir = heir;
377
378  /*
379   * It is critical that we first update the heir and then the dispatch
380   * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
381   * update.
382   */
383  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
384
385  /*
386   * Only update the dispatch necessary indicator if not already set to
387   * avoid superfluous inter-processor interrupts.
388   */
389  if ( !cpu_for_heir->dispatch_necessary ) {
390    cpu_for_heir->dispatch_necessary = true;
391
392    if ( cpu_for_heir != cpu_self ) {
393      _Per_CPU_Send_interrupt( cpu_for_heir );
394    }
395  }
396}
397
398static void _Scheduler_SMP_Allocate_processor(
399  Scheduler_SMP_Context *self,
400  Thread_Control *scheduled,
401  Thread_Control *victim
402)
403{
404  Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled );
405  Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
406  Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim );
407  Per_CPU_Control *cpu_self = _Per_CPU_Get();
408  Thread_Control *heir;
409
410  _Scheduler_SMP_Node_change_state(
411    scheduled_node,
412    SCHEDULER_SMP_NODE_SCHEDULED
413  );
414
415  _Assert( _ISR_Get_level() != 0 );
416
417  if ( _Thread_Is_executing_on_a_processor( scheduled ) ) {
418    if ( _Scheduler_SMP_Is_processor_owned_by_us( self, cpu_of_scheduled ) ) {
419      heir = cpu_of_scheduled->heir;
420      _Scheduler_SMP_Update_heir( cpu_self, cpu_of_scheduled, scheduled );
421    } else {
422      /* We have to force a migration to our processor set */
423      _Assert( scheduled->debug_real_cpu->heir != scheduled );
424      heir = scheduled;
425    }
426  } else {
427    heir = scheduled;
428  }
429
430  if ( heir != victim ) {
431    _Thread_Set_CPU( heir, cpu_of_victim );
432    _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, heir );
433  }
434}
435
436static Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
437  Scheduler_Context *context,
438  Thread_Control    *filter,
439  Chain_Node_order   order
440)
441{
442  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
443  Thread_Control *lowest_ready = NULL;
444  Chain_Control *scheduled = &self->Scheduled;
445
446  if ( !_Chain_Is_empty( scheduled ) ) {
447    lowest_ready = (Thread_Control *) _Chain_Last( scheduled );
448  }
449
450  /*
451   * _Scheduler_SMP_Enqueue_ordered() assumes that get_lowest_scheduled
452   * helpers may return NULL. But this method never should.
453   */
454  _Assert( lowest_ready != NULL );
455
456  return lowest_ready;
457}
458
459/**
460 * @brief Enqueues a thread according to the specified order function.
461 *
462 * The thread must not be in the scheduled state.
463 *
464 * @param[in] context The scheduler instance context.
465 * @param[in] thread The thread to enqueue.
466 * @param[in] order The order function.
467 * @param[in] insert_ready Function to insert a node into the set of ready
468 *   nodes.
469 * @param[in] insert_scheduled Function to insert a node into the set of
470 *   scheduled nodes.
471 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
472 *   of scheduled nodes to the set of ready nodes.
473 * @param[in] get_lowest_scheduled Function to select the thread from the
474 *   scheduled nodes to replace. It may not be possible to find one.
475 * @param[in] allocate_processor Function to allocate a processor to a thread
476 *   based on the rules of the scheduler.
477 */
478static inline void _Scheduler_SMP_Enqueue_ordered(
479  Scheduler_Context                 *context,
480  Thread_Control                    *thread,
481  Chain_Node_order                   order,
482  Scheduler_SMP_Insert                insert_ready,
483  Scheduler_SMP_Insert                insert_scheduled,
484  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
485  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
486  Scheduler_SMP_Allocate_processor    allocate_processor
487)
488{
489  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
490  Thread_Control *lowest_scheduled =
491    ( *get_lowest_scheduled )( context, thread, order );
492
493  /*
494   *  get_lowest_scheduled can return a NULL if no scheduled threads
495   *  should be removed from their processor based on the selection
496   *  criteria. For example, this can occur when the affinity of the
497   *  thread being enqueued schedules it against higher priority threads.
498   *  A low priority thread with affinity can only consider the threads
499   *  which are on the cores if has affinity for.
500   *
501   *  The get_lowest_scheduled helper should assert on not returning NULL
502   *  if that is not possible for that scheduler.
503   */
504
505  if ( lowest_scheduled &&
506       ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) ) {
507    Scheduler_SMP_Node *lowest_scheduled_node =
508      _Scheduler_SMP_Node_get( lowest_scheduled );
509
510    _Scheduler_SMP_Node_change_state(
511      lowest_scheduled_node,
512      SCHEDULER_SMP_NODE_READY
513    );
514    ( *allocate_processor )( self, thread, lowest_scheduled );
515    ( *insert_scheduled )( &self->Base, thread );
516    ( *move_from_scheduled_to_ready )( &self->Base, lowest_scheduled );
517  } else {
518    ( *insert_ready )( &self->Base, thread );
519  }
520}
521
522/**
523 * @brief Enqueues a scheduled thread according to the specified order
524 * function.
525 *
526 * @param[in] context The scheduler instance context.
527 * @param[in] thread The thread to enqueue.
528 * @param[in] order The order function.
529 * @param[in] get_highest_ready Function to get the highest ready node.
530 * @param[in] insert_ready Function to insert a node into the set of ready
531 *   nodes.
532 * @param[in] insert_scheduled Function to insert a node into the set of
533 *   scheduled nodes.
534 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
535 *   of ready nodes to the set of scheduled nodes.
536 * @param[in] allocate_processor Function to allocate a processor to a thread
537 *   based on the rules of the scheduler.
538 */
539static inline void _Scheduler_SMP_Enqueue_scheduled_ordered(
540  Scheduler_Context *context,
541  Thread_Control                   *thread,
542  Chain_Node_order                  order,
543  Scheduler_SMP_Get_highest_ready   get_highest_ready,
544  Scheduler_SMP_Insert              insert_ready,
545  Scheduler_SMP_Insert              insert_scheduled,
546  Scheduler_SMP_Move                move_from_ready_to_scheduled,
547  Scheduler_SMP_Allocate_processor  allocate_processor
548)
549{
550  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
551  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
552  Thread_Control *highest_ready =
553    ( *get_highest_ready )( &self->Base, thread );
554
555  _Assert( highest_ready != NULL );
556
557  /*
558   * The thread has been extracted from the scheduled chain.  We have to place
559   * it now on the scheduled or ready set.
560   */
561  if ( ( *order )( &thread->Object.Node, &highest_ready->Object.Node ) ) {
562    ( *insert_scheduled )( &self->Base, thread );
563  } else {
564    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
565    ( *allocate_processor) ( self, highest_ready, thread );
566    ( *insert_ready )( &self->Base, thread );
567    ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
568  }
569}
570
571static inline void _Scheduler_SMP_Extract_from_scheduled(
572  Thread_Control *thread
573)
574{
575  _Chain_Extract_unprotected( &thread->Object.Node );
576}
577
578static inline void _Scheduler_SMP_Schedule_highest_ready(
579  Scheduler_Context *context,
580  Thread_Control *victim,
581  Scheduler_SMP_Get_highest_ready get_highest_ready,
582  Scheduler_SMP_Move move_from_ready_to_scheduled,
583  Scheduler_SMP_Allocate_processor allocate_processor
584)
585{
586  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
587  Thread_Control *highest_ready =
588    ( *get_highest_ready )( &self->Base, victim );
589
590  ( *allocate_processor )( self, highest_ready, victim );
591
592  ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
593}
594
595/**
596 * @brief Blocks a thread.
597 *
598 * @param[in] context The scheduler instance context.
599 * @param[in] thread The thread of the scheduling operation.
600 * @param[in] extract_from_ready Function to extract a node from the set of
601 * ready nodes.
602 * @param[in] get_highest_ready Function to get the highest ready node.
603 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
604 * of ready nodes to the set of scheduled nodes.
605 */
606static inline void _Scheduler_SMP_Block(
607  Scheduler_Context *context,
608  Thread_Control *thread,
609  Scheduler_SMP_Extract extract_from_ready,
610  Scheduler_SMP_Get_highest_ready get_highest_ready,
611  Scheduler_SMP_Move move_from_ready_to_scheduled,
612  Scheduler_SMP_Allocate_processor allocate_processor
613)
614{
615  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
616  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
617
618  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
619
620  if ( is_scheduled ) {
621    _Scheduler_SMP_Extract_from_scheduled( thread );
622
623    _Scheduler_SMP_Schedule_highest_ready(
624      context,
625      thread,
626      get_highest_ready,
627      move_from_ready_to_scheduled,
628      allocate_processor
629    );
630  } else {
631    ( *extract_from_ready )( context, thread );
632  }
633}
634
635static inline void _Scheduler_SMP_Unblock(
636  Scheduler_Context *context,
637  Thread_Control *thread,
638  Scheduler_SMP_Enqueue enqueue_fifo
639)
640{
641  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
642
643  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
644
645  ( *enqueue_fifo )( context, thread );
646}
647
648static inline void _Scheduler_SMP_Change_priority(
649  Scheduler_Context *context,
650  Thread_Control *thread,
651  Priority_Control new_priority,
652  bool prepend_it,
653  Scheduler_SMP_Extract extract_from_ready,
654  Scheduler_SMP_Update update,
655  Scheduler_SMP_Enqueue enqueue_fifo,
656  Scheduler_SMP_Enqueue enqueue_lifo,
657  Scheduler_SMP_Enqueue enqueue_scheduled_fifo,
658  Scheduler_SMP_Enqueue enqueue_scheduled_lifo
659)
660{
661  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
662
663  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
664    _Scheduler_SMP_Extract_from_scheduled( thread );
665
666    ( *update )( context, &node->Base, new_priority );
667
668    if ( prepend_it ) {
669      ( *enqueue_scheduled_lifo )( context, thread );
670    } else {
671      ( *enqueue_scheduled_fifo )( context, thread );
672    }
673  } else {
674    ( *extract_from_ready )( context, thread );
675
676    ( *update )( context, &node->Base, new_priority );
677
678    if ( prepend_it ) {
679      ( *enqueue_lifo )( context, thread );
680    } else {
681      ( *enqueue_fifo )( context, thread );
682    }
683  }
684}
685
686static inline void _Scheduler_SMP_Insert_scheduled_lifo(
687  Scheduler_Context *context,
688  Thread_Control *thread
689)
690{
691  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
692
693  _Chain_Insert_ordered_unprotected(
694    &self->Scheduled,
695    &thread->Object.Node,
696    _Scheduler_simple_Insert_priority_lifo_order
697  );
698}
699
700static inline void _Scheduler_SMP_Insert_scheduled_fifo(
701  Scheduler_Context *context,
702  Thread_Control *thread
703)
704{
705  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
706
707  _Chain_Insert_ordered_unprotected(
708    &self->Scheduled,
709    &thread->Object.Node,
710    _Scheduler_simple_Insert_priority_fifo_order
711  );
712}
713
714/** @} */
715
716#ifdef __cplusplus
717}
718#endif /* __cplusplus */
719
720#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.