source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ c0bff5e

4.115
Last change on this file since c0bff5e was c0bff5e, checked in by Sebastian Huber <sebastian.huber@…>, on 05/15/14 at 08:31:22

score: Split SMP scheduler enqueue function

Extract code from _Scheduler_SMP_Enqueue_ordered() and move it to the
new function _Scheduler_SMP_Enqueue_scheduled_ordered() to avoid
untestable execution paths.

Add and use function _Scheduler_SMP_Unblock().

  • Property mode set to 100644
File size: 16.5 KB
RevLine 
[9d83f58a]1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
[494c2e3]10 * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
[9d83f58a]11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
[c499856]20 * http://www.rtems.org/license/LICENSE.
[9d83f58a]21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
[38b59a6]27#include <rtems/score/assert.h>
[48c4a55]28#include <rtems/score/chainimpl.h>
[38b59a6]29#include <rtems/score/schedulersimpleimpl.h>
[9d83f58a]30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
[c6522a65]38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
[f39f667a]40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
41 * - @ref SCHEDULER_SMP_NODE_READY.
[c6522a65]42 *
[f39f667a]43 * State transitions are triggered via basic operations
[c0bff5e]44 * - _Scheduler_SMP_Enqueue_ordered(),
45 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
[f39f667a]46 * - _Scheduler_SMP_Block().
[c6522a65]47 *
48 * @dot
49 * digraph {
50 *   node [style="filled"];
51 *
52 *   bs [label="BLOCKED"];
53 *   ss [label="SCHEDULED", fillcolor="green"];
54 *   rs [label="READY", fillcolor="red"];
55 *
56 *   edge [label="enqueue"];
57 *   edge [fontcolor="darkgreen", color="darkgreen"];
58 *
59 *   bs -> ss;
60 *
61 *   edge [fontcolor="red", color="red"];
62 *
63 *   bs -> rs;
64 *
65 *   edge [label="enqueue other"];
66 *
67 *   ss -> rs;
68 *
[f39f667a]69 *   edge [label="block"];
[c6522a65]70 *   edge [fontcolor="black", color="black"];
71 *
72 *   rs -> bs;
73 *
[f39f667a]74 *   edge [label="block other"];
[c6522a65]75 *   edge [fontcolor="darkgreen", color="darkgreen"];
76 *
77 *   rs -> ss;
78 * }
79 * @enddot
80 *
81 * During system initialization each processor of the scheduler instance starts
82 * with an idle thread assigned to it.  Lets have a look at an example with two
83 * idle threads I and J with priority 5.  We also have blocked threads A, B and
84 * C with priorities 1, 2 and 3 respectively.
85 *
86 * @dot
87 * digraph {
88 *   node [style="filled"];
89 *   edge [dir="none"];
90 *   subgraph {
91 *     rank = same;
92 *
93 *     i [label="I (5)", fillcolor="green"];
94 *     j [label="J (5)", fillcolor="green"];
95 *     a [label="A (1)"];
96 *     b [label="B (2)"];
97 *     c [label="C (3)"];
98 *     i -> j;
99 *   }
100 *
101 *   subgraph {
102 *     rank = same;
103 *
104 *     p0 [label="PROCESSOR 0", shape="box"];
105 *     p1 [label="PROCESSOR 1", shape="box"];
106 *   }
107 *
108 *   i -> p0;
109 *   j -> p1;
110 * }
111 * @enddot
112 *
113 * Lets start A.  For this an enqueue operation is performed.
114 *
115 * @dot
116 * digraph {
117 *   node [style="filled"];
118 *   edge [dir="none"];
119 *
120 *   subgraph {
121 *     rank = same;
122 *
123 *     i [label="I (5)", fillcolor="green"];
124 *     j [label="J (5)", fillcolor="red"];
125 *     a [label="A (1)", fillcolor="green"];
126 *     b [label="B (2)"];
127 *     c [label="C (3)"];
128 *     a -> i;
129 *   }
130 *
131 *   subgraph {
132 *     rank = same;
133 *
134 *     p0 [label="PROCESSOR 0", shape="box"];
135 *     p1 [label="PROCESSOR 1", shape="box"];
136 *   }
137 *
138 *   i -> p0;
139 *   a -> p1;
140 * }
141 * @enddot
142 *
143 * Lets start C.
144 *
145 * @dot
146 * digraph {
147 *   node [style="filled"];
148 *   edge [dir="none"];
149 *
150 *   subgraph {
151 *     rank = same;
152 *
153 *     a [label="A (1)", fillcolor="green"];
154 *     c [label="C (3)", fillcolor="green"];
155 *     i [label="I (5)", fillcolor="red"];
156 *     j [label="J (5)", fillcolor="red"];
157 *     b [label="B (2)"];
158 *     a -> c;
159 *     i -> j;
160 *   }
161 *
162 *   subgraph {
163 *     rank = same;
164 *
165 *     p0 [label="PROCESSOR 0", shape="box"];
166 *     p1 [label="PROCESSOR 1", shape="box"];
167 *   }
168 *
169 *   c -> p0;
170 *   a -> p1;
171 * }
172 * @enddot
173 *
174 * Lets start B.
175 *
176 * @dot
177 * digraph {
178 *   node [style="filled"];
179 *   edge [dir="none"];
180 *
181 *   subgraph {
182 *     rank = same;
183 *
184 *     a [label="A (1)", fillcolor="green"];
185 *     b [label="B (2)", fillcolor="green"];
186 *     c [label="C (3)", fillcolor="red"];
187 *     i [label="I (5)", fillcolor="red"];
188 *     j [label="J (5)", fillcolor="red"];
189 *     a -> b;
190 *     c -> i -> j;
191 *   }
192 *
193 *   subgraph {
194 *     rank = same;
195 *
196 *     p0 [label="PROCESSOR 0", shape="box"];
197 *     p1 [label="PROCESSOR 1", shape="box"];
198 *   }
199 *
200 *   b -> p0;
201 *   a -> p1;
202 * }
203 * @enddot
204 *
[f39f667a]205 * Lets change the priority of thread A to 4.
[c6522a65]206 *
207 * @dot
208 * digraph {
209 *   node [style="filled"];
210 *   edge [dir="none"];
211 *
212 *   subgraph {
213 *     rank = same;
214 *
215 *     b [label="B (2)", fillcolor="green"];
216 *     c [label="C (3)", fillcolor="green"];
217 *     a [label="A (4)", fillcolor="red"];
218 *     i [label="I (5)", fillcolor="red"];
219 *     j [label="J (5)", fillcolor="red"];
220 *     b -> c;
221 *     a -> i -> j;
222 *   }
223 *
224 *   subgraph {
225 *     rank = same;
226 *
227 *     p0 [label="PROCESSOR 0", shape="box"];
228 *     p1 [label="PROCESSOR 1", shape="box"];
229 *   }
230 *
231 *   b -> p0;
232 *   c -> p1;
233 * }
234 * @enddot
235 *
[f39f667a]236 * Now perform a blocking operation with thread B.  Please note that thread A
237 * migrated now from processor 0 to processor 1 and thread C still executes on
238 * processor 1.
[c6522a65]239 *
240 * @dot
241 * digraph {
242 *   node [style="filled"];
243 *   edge [dir="none"];
244 *
245 *   subgraph {
246 *     rank = same;
247 *
248 *     c [label="C (3)", fillcolor="green"];
[f39f667a]249 *     a [label="A (4)", fillcolor="green"];
[c6522a65]250 *     i [label="I (5)", fillcolor="red"];
251 *     j [label="J (5)", fillcolor="red"];
[f39f667a]252 *     b [label="B (2)"];
253 *     c -> a;
[c6522a65]254 *     i -> j;
255 *   }
256 *
257 *   subgraph {
258 *     rank = same;
259 *
260 *     p0 [label="PROCESSOR 0", shape="box"];
261 *     p1 [label="PROCESSOR 1", shape="box"];
262 *   }
263 *
[f39f667a]264 *   a -> p0;
[c6522a65]265 *   c -> p1;
266 * }
267 * @enddot
268 *
[9d83f58a]269 * @{
270 */
271
[48c4a55]272typedef Thread_Control *( *Scheduler_SMP_Get_highest_ready )(
[3730a07f]273  Scheduler_Context *context
[48c4a55]274);
275
276typedef void ( *Scheduler_SMP_Extract )(
[3730a07f]277  Scheduler_Context *context,
[48c4a55]278  Thread_Control *thread
279);
280
281typedef void ( *Scheduler_SMP_Insert )(
[3730a07f]282  Scheduler_Context *context,
[48c4a55]283  Thread_Control *thread_to_insert
284);
285
286typedef void ( *Scheduler_SMP_Move )(
[3730a07f]287  Scheduler_Context *context,
[48c4a55]288  Thread_Control *thread_to_move
289);
290
[f39f667a]291typedef void ( *Scheduler_SMP_Update )(
292  Scheduler_Context *context,
293  Scheduler_Node *node,
294  Priority_Control new_priority
295);
296
297typedef void ( *Scheduler_SMP_Enqueue )(
298  Scheduler_Context *context,
[c0bff5e]299  Thread_Control *thread_to_enqueue
[f39f667a]300);
301
[3730a07f]302static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
303  Scheduler_Context *context
304)
305{
306  return (Scheduler_SMP_Context *) context;
307}
308
[494c2e3]309static inline void _Scheduler_SMP_Initialize(
[e1598a6]310  Scheduler_SMP_Context *self
[494c2e3]311)
[9d83f58a]312{
[494c2e3]313  _Chain_Initialize_empty( &self->Scheduled );
[9d83f58a]314}
315
[beab7329]316static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_get(
317  Thread_Control *thread
318)
319{
320  return (Scheduler_SMP_Node *) _Scheduler_Node_get( thread );
321}
322
323static inline void _Scheduler_SMP_Node_initialize(
324  Scheduler_SMP_Node *node
325)
326{
327  node->state = SCHEDULER_SMP_NODE_BLOCKED;
328}
329
[f39f667a]330extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
[beab7329]331
332static inline void _Scheduler_SMP_Node_change_state(
333  Scheduler_SMP_Node *node,
334  Scheduler_SMP_Node_state new_state
335)
336{
337  _Assert(
338    _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ]
339  );
340
341  node->state = new_state;
342}
343
[38b59a6]344static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
345  const Scheduler_SMP_Context *self,
346  const Per_CPU_Control *cpu
347)
348{
349  return cpu->scheduler_context == &self->Base;
350}
351
352static inline void _Scheduler_SMP_Update_heir(
353  Per_CPU_Control *cpu_self,
354  Per_CPU_Control *cpu_for_heir,
355  Thread_Control *heir
356)
357{
358  cpu_for_heir->heir = heir;
359
360  /*
361   * It is critical that we first update the heir and then the dispatch
362   * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
363   * update.
364   */
365  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
366
367  /*
368   * Only update the dispatch necessary indicator if not already set to
369   * avoid superfluous inter-processor interrupts.
370   */
371  if ( !cpu_for_heir->dispatch_necessary ) {
372    cpu_for_heir->dispatch_necessary = true;
373
374    if ( cpu_for_heir != cpu_self ) {
375      _Per_CPU_Send_interrupt( cpu_for_heir );
376    }
377  }
378}
379
[fc2ad63]380static inline void _Scheduler_SMP_Allocate_processor(
[38b59a6]381  Scheduler_SMP_Context *self,
[fc2ad63]382  Thread_Control *scheduled,
383  Thread_Control *victim
384)
385{
[beab7329]386  Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled );
[a5ac9da]387  Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
388  Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim );
[38b59a6]389  Per_CPU_Control *cpu_self = _Per_CPU_Get();
[fc2ad63]390  Thread_Control *heir;
391
[beab7329]392  _Scheduler_SMP_Node_change_state(
393    scheduled_node,
394    SCHEDULER_SMP_NODE_SCHEDULED
395  );
[fc2ad63]396
[38b59a6]397  _Assert( _ISR_Get_level() != 0 );
[fc2ad63]398
[38b59a6]399  if ( _Thread_Is_executing_on_a_processor( scheduled ) ) {
400    if ( _Scheduler_SMP_Is_processor_owned_by_us( self, cpu_of_scheduled ) ) {
401      heir = cpu_of_scheduled->heir;
402      _Scheduler_SMP_Update_heir( cpu_self, cpu_of_scheduled, scheduled );
403    } else {
404      /* We have to force a migration to our processor set */
405      _Assert( scheduled->debug_real_cpu->heir != scheduled );
406      heir = scheduled;
407    }
[fc2ad63]408  } else {
409    heir = scheduled;
410  }
411
412  if ( heir != victim ) {
[a5ac9da]413    _Thread_Set_CPU( heir, cpu_of_victim );
[38b59a6]414    _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, heir );
[fc2ad63]415  }
416}
417
[aea4a91]418static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
[e1598a6]419  Scheduler_SMP_Context *self
[aea4a91]420)
421{
422  Thread_Control *lowest_ready = NULL;
[494c2e3]423  Chain_Control *scheduled = &self->Scheduled;
[aea4a91]424
425  if ( !_Chain_Is_empty( scheduled ) ) {
426    lowest_ready = (Thread_Control *) _Chain_Last( scheduled );
427  }
428
429  return lowest_ready;
430}
431
[c6522a65]432/**
433 * @brief Enqueues a thread according to the specified order function.
434 *
[c0bff5e]435 * The thread must not be in the scheduled state.
436 *
[c6522a65]437 * @param[in] context The scheduler instance context.
438 * @param[in] thread The thread to enqueue.
439 * @param[in] order The order function.
440 * @param[in] insert_ready Function to insert a node into the set of ready
441 * nodes.
442 * @param[in] insert_scheduled Function to insert a node into the set of
443 * scheduled nodes.
444 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
445 * of scheduled nodes to the set of ready nodes.
446 */
[48c4a55]447static inline void _Scheduler_SMP_Enqueue_ordered(
[3730a07f]448  Scheduler_Context *context,
[48c4a55]449  Thread_Control *thread,
450  Chain_Node_order order,
451  Scheduler_SMP_Insert insert_ready,
452  Scheduler_SMP_Insert insert_scheduled,
453  Scheduler_SMP_Move move_from_scheduled_to_ready
454)
[c0bff5e]455{
456  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
457  Thread_Control *lowest_scheduled =
458    _Scheduler_SMP_Get_lowest_scheduled( self );
459
460  _Assert( lowest_scheduled != NULL);
461
462  /*
463   * NOTE: Do not exchange parameters to do the negation of the order check.
464   */
465  if ( ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) ) {
466    Scheduler_SMP_Node *lowest_scheduled_node =
467      _Scheduler_SMP_Node_get( lowest_scheduled );
468
469    _Scheduler_SMP_Node_change_state(
470      lowest_scheduled_node,
471      SCHEDULER_SMP_NODE_READY
472    );
473    _Scheduler_SMP_Allocate_processor( self, thread, lowest_scheduled );
474    ( *insert_scheduled )( &self->Base, thread );
475    ( *move_from_scheduled_to_ready )( &self->Base, lowest_scheduled );
476  } else {
477    ( *insert_ready )( &self->Base, thread );
478  }
479}
480
481/**
482 * @brief Enqueues a scheduled thread according to the specified order
483 * function.
484 *
485 * @param[in] context The scheduler instance context.
486 * @param[in] thread The thread to enqueue.
487 * @param[in] order The order function.
488 * @param[in] get_highest_ready Function to get the highest ready node.
489 * @param[in] insert_ready Function to insert a node into the set of ready
490 * nodes.
491 * @param[in] insert_scheduled Function to insert a node into the set of
492 * scheduled nodes.
493 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
494 * of ready nodes to the set of scheduled nodes.
495 */
496static inline void _Scheduler_SMP_Enqueue_scheduled_ordered(
497  Scheduler_Context *context,
498  Thread_Control *thread,
499  Chain_Node_order order,
500  Scheduler_SMP_Get_highest_ready get_highest_ready,
501  Scheduler_SMP_Insert insert_ready,
502  Scheduler_SMP_Insert insert_scheduled,
503  Scheduler_SMP_Move move_from_ready_to_scheduled
504)
[48c4a55]505{
[3730a07f]506  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
[beab7329]507  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
[c0bff5e]508  Thread_Control *highest_ready = ( *get_highest_ready )( &self->Base );
[48c4a55]509
[c0bff5e]510  _Assert( highest_ready != NULL);
511
512  /*
513   * The thread has been extracted from the scheduled chain.  We have to place
514   * it now on the scheduled or ready set.
515   *
516   * NOTE: Do not exchange parameters to do the negation of the order check.
517   */
518  if ( !( *order )( &thread->Object.Node, &highest_ready->Object.Node ) ) {
519    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
520    _Scheduler_SMP_Allocate_processor( self, highest_ready, thread );
521    ( *insert_ready )( &self->Base, thread );
522    ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
[48c4a55]523  } else {
[c0bff5e]524    ( *insert_scheduled )( &self->Base, thread );
[48c4a55]525  }
526}
527
[c0bff5e]528static inline void _Scheduler_SMP_Extract_from_scheduled(
529  Thread_Control *thread
530)
[f39f667a]531{
532  _Chain_Extract_unprotected( &thread->Object.Node );
533}
534
[48c4a55]535static inline void _Scheduler_SMP_Schedule_highest_ready(
[3730a07f]536  Scheduler_Context *context,
[48c4a55]537  Thread_Control *victim,
538  Scheduler_SMP_Get_highest_ready get_highest_ready,
539  Scheduler_SMP_Move move_from_ready_to_scheduled
540)
541{
[3730a07f]542  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
543  Thread_Control *highest_ready = ( *get_highest_ready )( &self->Base );
[48c4a55]544
[38b59a6]545  _Scheduler_SMP_Allocate_processor( self, highest_ready, victim );
[48c4a55]546
[3730a07f]547  ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
[48c4a55]548}
549
[c6522a65]550/**
[f39f667a]551 * @brief Blocks a thread.
[c6522a65]552 *
553 * @param[in] context The scheduler instance context.
554 * @param[in] thread The thread of the scheduling operation.
[f39f667a]555 * @param[in] extract_from_ready Function to extract a node from the set of
556 * ready nodes.
[c6522a65]557 * @param[in] get_highest_ready Function to get the highest ready node.
558 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
559 * of ready nodes to the set of scheduled nodes.
560 */
[f39f667a]561static inline void _Scheduler_SMP_Block(
[3730a07f]562  Scheduler_Context *context,
[48c4a55]563  Thread_Control *thread,
[f39f667a]564  Scheduler_SMP_Extract extract_from_ready,
[48c4a55]565  Scheduler_SMP_Get_highest_ready get_highest_ready,
566  Scheduler_SMP_Move move_from_ready_to_scheduled
567)
568{
[beab7329]569  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
[f39f667a]570  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
571
572  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
[beab7329]573
[f39f667a]574  if ( is_scheduled ) {
575    _Scheduler_SMP_Extract_from_scheduled( thread );
[48c4a55]576
577    _Scheduler_SMP_Schedule_highest_ready(
[3730a07f]578      context,
[48c4a55]579      thread,
580      get_highest_ready,
581      move_from_ready_to_scheduled
582    );
[f39f667a]583  } else {
584    ( *extract_from_ready )( context, thread );
[48c4a55]585  }
586}
587
[c0bff5e]588static inline void _Scheduler_SMP_Unblock(
589  Scheduler_Context *context,
590  Thread_Control *thread,
591  Scheduler_SMP_Enqueue enqueue_fifo
592)
593{
594  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
595
596  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
597
598  ( *enqueue_fifo )( context, thread );
599}
600
[f39f667a]601static inline void _Scheduler_SMP_Change_priority(
[3730a07f]602  Scheduler_Context *context,
[48c4a55]603  Thread_Control *thread,
[f39f667a]604  Priority_Control new_priority,
605  bool prepend_it,
606  Scheduler_SMP_Extract extract_from_ready,
607  Scheduler_SMP_Update update,
608  Scheduler_SMP_Enqueue enqueue_fifo,
[c0bff5e]609  Scheduler_SMP_Enqueue enqueue_lifo,
610  Scheduler_SMP_Enqueue enqueue_scheduled_fifo,
611  Scheduler_SMP_Enqueue enqueue_scheduled_lifo
[48c4a55]612)
613{
[f39f667a]614  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
[a336d51]615
[c0bff5e]616  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
[f39f667a]617    _Scheduler_SMP_Extract_from_scheduled( thread );
[c0bff5e]618
619    ( *update )( context, &node->Base, new_priority );
620
621    if ( prepend_it ) {
622      ( *enqueue_scheduled_lifo )( context, thread );
623    } else {
624      ( *enqueue_scheduled_fifo )( context, thread );
625    }
[f39f667a]626  } else {
627    ( *extract_from_ready )( context, thread );
[48c4a55]628
[c0bff5e]629    ( *update )( context, &node->Base, new_priority );
[f39f667a]630
[c0bff5e]631    if ( prepend_it ) {
632      ( *enqueue_lifo )( context, thread );
633    } else {
634      ( *enqueue_fifo )( context, thread );
635    }
[f39f667a]636  }
[48c4a55]637}
638
639static inline void _Scheduler_SMP_Insert_scheduled_lifo(
[3730a07f]640  Scheduler_Context *context,
[48c4a55]641  Thread_Control *thread
642)
643{
[3730a07f]644  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
645
[48c4a55]646  _Chain_Insert_ordered_unprotected(
[494c2e3]647    &self->Scheduled,
[48c4a55]648    &thread->Object.Node,
649    _Scheduler_simple_Insert_priority_lifo_order
650  );
651}
652
653static inline void _Scheduler_SMP_Insert_scheduled_fifo(
[3730a07f]654  Scheduler_Context *context,
[48c4a55]655  Thread_Control *thread
656)
657{
[3730a07f]658  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
659
[48c4a55]660  _Chain_Insert_ordered_unprotected(
[494c2e3]661    &self->Scheduled,
[48c4a55]662    &thread->Object.Node,
663    _Scheduler_simple_Insert_priority_fifo_order
664  );
665}
666
[9d83f58a]667/** @} */
668
669#ifdef __cplusplus
670}
671#endif /* __cplusplus */
672
673#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.