source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ f39f667a

4.115
Last change on this file since f39f667a was f39f667a, checked in by Sebastian Huber <sebastian.huber@…>, on 05/14/14 at 11:50:48

score: Simplify _Thread_Change_priority()

The function to change a thread priority was too complex. Simplify it
with a new scheduler operation. This increases the average case
performance due to the simplified logic. The interrupt disabled
critical section is a bit prolonged since now the extract, update and
enqueue steps are executed atomically. This should however not impact
the worst-case interrupt latency since at least for the Deterministic
Priority Scheduler this sequence can be carried out with a wee bit of
instructions and no loops.

Add _Scheduler_Change_priority() to replace the sequence of

  • _Thread_Set_transient(),
  • _Scheduler_Extract(),
  • _Scheduler_Enqueue(), and
  • _Scheduler_Enqueue_first().

Delete STATES_TRANSIENT, _States_Is_transient() and
_Thread_Set_transient() since this state is now superfluous.

With this change it is possible to get rid of the
SCHEDULER_SMP_NODE_IN_THE_AIR state. This considerably simplifies the
implementation of the new SMP locking protocols.

  • Property mode set to 100644
File size: 15.9 KB
RevLine 
[9d83f58a]1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
[494c2e3]10 * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
[9d83f58a]11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
[c499856]20 * http://www.rtems.org/license/LICENSE.
[9d83f58a]21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
[38b59a6]27#include <rtems/score/assert.h>
[48c4a55]28#include <rtems/score/chainimpl.h>
[38b59a6]29#include <rtems/score/schedulersimpleimpl.h>
[9d83f58a]30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
[c6522a65]38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
[f39f667a]40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
41 * - @ref SCHEDULER_SMP_NODE_READY.
[c6522a65]42 *
[f39f667a]43 * State transitions are triggered via basic operations
44 * - _Scheduler_SMP_Enqueue_ordered(), and
45 * - _Scheduler_SMP_Block().
[c6522a65]46 *
47 * @dot
48 * digraph {
49 *   node [style="filled"];
50 *
51 *   bs [label="BLOCKED"];
52 *   ss [label="SCHEDULED", fillcolor="green"];
53 *   rs [label="READY", fillcolor="red"];
54 *
55 *   edge [label="enqueue"];
56 *   edge [fontcolor="darkgreen", color="darkgreen"];
57 *
58 *   bs -> ss;
59 *
60 *   edge [fontcolor="red", color="red"];
61 *
62 *   bs -> rs;
63 *
64 *   edge [label="enqueue other"];
65 *
66 *   ss -> rs;
67 *
[f39f667a]68 *   edge [label="block"];
[c6522a65]69 *   edge [fontcolor="black", color="black"];
70 *
71 *   rs -> bs;
72 *
[f39f667a]73 *   edge [label="block other"];
[c6522a65]74 *   edge [fontcolor="darkgreen", color="darkgreen"];
75 *
76 *   rs -> ss;
77 * }
78 * @enddot
79 *
80 * During system initialization each processor of the scheduler instance starts
81 * with an idle thread assigned to it.  Lets have a look at an example with two
82 * idle threads I and J with priority 5.  We also have blocked threads A, B and
83 * C with priorities 1, 2 and 3 respectively.
84 *
85 * @dot
86 * digraph {
87 *   node [style="filled"];
88 *   edge [dir="none"];
89 *   subgraph {
90 *     rank = same;
91 *
92 *     i [label="I (5)", fillcolor="green"];
93 *     j [label="J (5)", fillcolor="green"];
94 *     a [label="A (1)"];
95 *     b [label="B (2)"];
96 *     c [label="C (3)"];
97 *     i -> j;
98 *   }
99 *
100 *   subgraph {
101 *     rank = same;
102 *
103 *     p0 [label="PROCESSOR 0", shape="box"];
104 *     p1 [label="PROCESSOR 1", shape="box"];
105 *   }
106 *
107 *   i -> p0;
108 *   j -> p1;
109 * }
110 * @enddot
111 *
112 * Lets start A.  For this an enqueue operation is performed.
113 *
114 * @dot
115 * digraph {
116 *   node [style="filled"];
117 *   edge [dir="none"];
118 *
119 *   subgraph {
120 *     rank = same;
121 *
122 *     i [label="I (5)", fillcolor="green"];
123 *     j [label="J (5)", fillcolor="red"];
124 *     a [label="A (1)", fillcolor="green"];
125 *     b [label="B (2)"];
126 *     c [label="C (3)"];
127 *     a -> i;
128 *   }
129 *
130 *   subgraph {
131 *     rank = same;
132 *
133 *     p0 [label="PROCESSOR 0", shape="box"];
134 *     p1 [label="PROCESSOR 1", shape="box"];
135 *   }
136 *
137 *   i -> p0;
138 *   a -> p1;
139 * }
140 * @enddot
141 *
142 * Lets start C.
143 *
144 * @dot
145 * digraph {
146 *   node [style="filled"];
147 *   edge [dir="none"];
148 *
149 *   subgraph {
150 *     rank = same;
151 *
152 *     a [label="A (1)", fillcolor="green"];
153 *     c [label="C (3)", fillcolor="green"];
154 *     i [label="I (5)", fillcolor="red"];
155 *     j [label="J (5)", fillcolor="red"];
156 *     b [label="B (2)"];
157 *     a -> c;
158 *     i -> j;
159 *   }
160 *
161 *   subgraph {
162 *     rank = same;
163 *
164 *     p0 [label="PROCESSOR 0", shape="box"];
165 *     p1 [label="PROCESSOR 1", shape="box"];
166 *   }
167 *
168 *   c -> p0;
169 *   a -> p1;
170 * }
171 * @enddot
172 *
173 * Lets start B.
174 *
175 * @dot
176 * digraph {
177 *   node [style="filled"];
178 *   edge [dir="none"];
179 *
180 *   subgraph {
181 *     rank = same;
182 *
183 *     a [label="A (1)", fillcolor="green"];
184 *     b [label="B (2)", fillcolor="green"];
185 *     c [label="C (3)", fillcolor="red"];
186 *     i [label="I (5)", fillcolor="red"];
187 *     j [label="J (5)", fillcolor="red"];
188 *     a -> b;
189 *     c -> i -> j;
190 *   }
191 *
192 *   subgraph {
193 *     rank = same;
194 *
195 *     p0 [label="PROCESSOR 0", shape="box"];
196 *     p1 [label="PROCESSOR 1", shape="box"];
197 *   }
198 *
199 *   b -> p0;
200 *   a -> p1;
201 * }
202 * @enddot
203 *
[f39f667a]204 * Lets change the priority of thread A to 4.
[c6522a65]205 *
206 * @dot
207 * digraph {
208 *   node [style="filled"];
209 *   edge [dir="none"];
210 *
211 *   subgraph {
212 *     rank = same;
213 *
214 *     b [label="B (2)", fillcolor="green"];
215 *     c [label="C (3)", fillcolor="green"];
216 *     a [label="A (4)", fillcolor="red"];
217 *     i [label="I (5)", fillcolor="red"];
218 *     j [label="J (5)", fillcolor="red"];
219 *     b -> c;
220 *     a -> i -> j;
221 *   }
222 *
223 *   subgraph {
224 *     rank = same;
225 *
226 *     p0 [label="PROCESSOR 0", shape="box"];
227 *     p1 [label="PROCESSOR 1", shape="box"];
228 *   }
229 *
230 *   b -> p0;
231 *   c -> p1;
232 * }
233 * @enddot
234 *
[f39f667a]235 * Now perform a blocking operation with thread B.  Please note that thread A
236 * migrated now from processor 0 to processor 1 and thread C still executes on
237 * processor 1.
[c6522a65]238 *
239 * @dot
240 * digraph {
241 *   node [style="filled"];
242 *   edge [dir="none"];
243 *
244 *   subgraph {
245 *     rank = same;
246 *
247 *     c [label="C (3)", fillcolor="green"];
[f39f667a]248 *     a [label="A (4)", fillcolor="green"];
[c6522a65]249 *     i [label="I (5)", fillcolor="red"];
250 *     j [label="J (5)", fillcolor="red"];
[f39f667a]251 *     b [label="B (2)"];
252 *     c -> a;
[c6522a65]253 *     i -> j;
254 *   }
255 *
256 *   subgraph {
257 *     rank = same;
258 *
259 *     p0 [label="PROCESSOR 0", shape="box"];
260 *     p1 [label="PROCESSOR 1", shape="box"];
261 *   }
262 *
[f39f667a]263 *   a -> p0;
[c6522a65]264 *   c -> p1;
265 * }
266 * @enddot
267 *
[9d83f58a]268 * @{
269 */
270
[48c4a55]271typedef Thread_Control *( *Scheduler_SMP_Get_highest_ready )(
[3730a07f]272  Scheduler_Context *context
[48c4a55]273);
274
275typedef void ( *Scheduler_SMP_Extract )(
[3730a07f]276  Scheduler_Context *context,
[48c4a55]277  Thread_Control *thread
278);
279
280typedef void ( *Scheduler_SMP_Insert )(
[3730a07f]281  Scheduler_Context *context,
[48c4a55]282  Thread_Control *thread_to_insert
283);
284
285typedef void ( *Scheduler_SMP_Move )(
[3730a07f]286  Scheduler_Context *context,
[48c4a55]287  Thread_Control *thread_to_move
288);
289
[f39f667a]290typedef void ( *Scheduler_SMP_Update )(
291  Scheduler_Context *context,
292  Scheduler_Node *node,
293  Priority_Control new_priority
294);
295
296typedef void ( *Scheduler_SMP_Enqueue )(
297  Scheduler_Context *context,
298  Thread_Control *thread_to_enqueue,
299  bool has_processor_allocated
300);
301
[3730a07f]302static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
303  Scheduler_Context *context
304)
305{
306  return (Scheduler_SMP_Context *) context;
307}
308
[494c2e3]309static inline void _Scheduler_SMP_Initialize(
[e1598a6]310  Scheduler_SMP_Context *self
[494c2e3]311)
[9d83f58a]312{
[494c2e3]313  _Chain_Initialize_empty( &self->Scheduled );
[9d83f58a]314}
315
[beab7329]316static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_get(
317  Thread_Control *thread
318)
319{
320  return (Scheduler_SMP_Node *) _Scheduler_Node_get( thread );
321}
322
323static inline void _Scheduler_SMP_Node_initialize(
324  Scheduler_SMP_Node *node
325)
326{
327  node->state = SCHEDULER_SMP_NODE_BLOCKED;
328}
329
[f39f667a]330extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
[beab7329]331
332static inline void _Scheduler_SMP_Node_change_state(
333  Scheduler_SMP_Node *node,
334  Scheduler_SMP_Node_state new_state
335)
336{
337  _Assert(
338    _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ]
339  );
340
341  node->state = new_state;
342}
343
[38b59a6]344static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
345  const Scheduler_SMP_Context *self,
346  const Per_CPU_Control *cpu
347)
348{
349  return cpu->scheduler_context == &self->Base;
350}
351
352static inline void _Scheduler_SMP_Update_heir(
353  Per_CPU_Control *cpu_self,
354  Per_CPU_Control *cpu_for_heir,
355  Thread_Control *heir
356)
357{
358  cpu_for_heir->heir = heir;
359
360  /*
361   * It is critical that we first update the heir and then the dispatch
362   * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
363   * update.
364   */
365  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
366
367  /*
368   * Only update the dispatch necessary indicator if not already set to
369   * avoid superfluous inter-processor interrupts.
370   */
371  if ( !cpu_for_heir->dispatch_necessary ) {
372    cpu_for_heir->dispatch_necessary = true;
373
374    if ( cpu_for_heir != cpu_self ) {
375      _Per_CPU_Send_interrupt( cpu_for_heir );
376    }
377  }
378}
379
[fc2ad63]380static inline void _Scheduler_SMP_Allocate_processor(
[38b59a6]381  Scheduler_SMP_Context *self,
[fc2ad63]382  Thread_Control *scheduled,
383  Thread_Control *victim
384)
385{
[beab7329]386  Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled );
[a5ac9da]387  Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
388  Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim );
[38b59a6]389  Per_CPU_Control *cpu_self = _Per_CPU_Get();
[fc2ad63]390  Thread_Control *heir;
391
[beab7329]392  _Scheduler_SMP_Node_change_state(
393    scheduled_node,
394    SCHEDULER_SMP_NODE_SCHEDULED
395  );
[fc2ad63]396
[38b59a6]397  _Assert( _ISR_Get_level() != 0 );
[fc2ad63]398
[38b59a6]399  if ( _Thread_Is_executing_on_a_processor( scheduled ) ) {
400    if ( _Scheduler_SMP_Is_processor_owned_by_us( self, cpu_of_scheduled ) ) {
401      heir = cpu_of_scheduled->heir;
402      _Scheduler_SMP_Update_heir( cpu_self, cpu_of_scheduled, scheduled );
403    } else {
404      /* We have to force a migration to our processor set */
405      _Assert( scheduled->debug_real_cpu->heir != scheduled );
406      heir = scheduled;
407    }
[fc2ad63]408  } else {
409    heir = scheduled;
410  }
411
412  if ( heir != victim ) {
[a5ac9da]413    _Thread_Set_CPU( heir, cpu_of_victim );
[38b59a6]414    _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, heir );
[fc2ad63]415  }
416}
417
[aea4a91]418static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
[e1598a6]419  Scheduler_SMP_Context *self
[aea4a91]420)
421{
422  Thread_Control *lowest_ready = NULL;
[494c2e3]423  Chain_Control *scheduled = &self->Scheduled;
[aea4a91]424
425  if ( !_Chain_Is_empty( scheduled ) ) {
426    lowest_ready = (Thread_Control *) _Chain_Last( scheduled );
427  }
428
429  return lowest_ready;
430}
431
[c6522a65]432/**
433 * @brief Enqueues a thread according to the specified order function.
434 *
435 * @param[in] context The scheduler instance context.
436 * @param[in] thread The thread to enqueue.
[f39f667a]437 * @param[in] has_processor_allocated The thread has a processor allocated.
[c6522a65]438 * @param[in] order The order function.
439 * @param[in] get_highest_ready Function to get the highest ready node.
440 * @param[in] insert_ready Function to insert a node into the set of ready
441 * nodes.
442 * @param[in] insert_scheduled Function to insert a node into the set of
443 * scheduled nodes.
444 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
445 * of ready nodes to the set of scheduled nodes.
446 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
447 * of scheduled nodes to the set of ready nodes.
448 */
[48c4a55]449static inline void _Scheduler_SMP_Enqueue_ordered(
[3730a07f]450  Scheduler_Context *context,
[48c4a55]451  Thread_Control *thread,
[f39f667a]452  bool has_processor_allocated,
[48c4a55]453  Chain_Node_order order,
454  Scheduler_SMP_Get_highest_ready get_highest_ready,
455  Scheduler_SMP_Insert insert_ready,
456  Scheduler_SMP_Insert insert_scheduled,
457  Scheduler_SMP_Move move_from_ready_to_scheduled,
458  Scheduler_SMP_Move move_from_scheduled_to_ready
459)
460{
[3730a07f]461  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
[beab7329]462  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
[48c4a55]463
[f39f667a]464  if ( has_processor_allocated) {
[3730a07f]465    Thread_Control *highest_ready = ( *get_highest_ready )( &self->Base );
[48c4a55]466
[f39f667a]467    _Assert( highest_ready != NULL);
468
[48c4a55]469    /*
470     * The thread has been extracted from the scheduled chain.  We have to
471     * place it now on the scheduled or ready chain.
472     *
473     * NOTE: Do not exchange parameters to do the negation of the order check.
474     */
[f39f667a]475    if ( !( *order )( &thread->Object.Node, &highest_ready->Object.Node ) ) {
[beab7329]476      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
[38b59a6]477      _Scheduler_SMP_Allocate_processor( self, highest_ready, thread );
[3730a07f]478      ( *insert_ready )( &self->Base, thread );
479      ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
[48c4a55]480    } else {
[beab7329]481      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
[3730a07f]482      ( *insert_scheduled )( &self->Base, thread );
[48c4a55]483    }
484  } else {
[3730a07f]485    Thread_Control *lowest_scheduled =
486      _Scheduler_SMP_Get_lowest_scheduled( self );
[48c4a55]487
[f39f667a]488    _Assert( lowest_scheduled != NULL);
489
490    if ( ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) ) {
[beab7329]491      Scheduler_SMP_Node *lowest_scheduled_node =
492        _Scheduler_SMP_Node_get( lowest_scheduled );
[48c4a55]493
[beab7329]494      _Scheduler_SMP_Node_change_state(
495        lowest_scheduled_node,
496        SCHEDULER_SMP_NODE_READY
497      );
498      _Scheduler_SMP_Allocate_processor( self, thread, lowest_scheduled );
[3730a07f]499      ( *insert_scheduled )( &self->Base, thread );
500      ( *move_from_scheduled_to_ready )( &self->Base, lowest_scheduled );
[48c4a55]501    } else {
[beab7329]502      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
[3730a07f]503      ( *insert_ready )( &self->Base, thread );
[48c4a55]504    }
505  }
506}
507
[f39f667a]508static inline void _Scheduler_SMP_Extract_from_scheduled( Thread_Control *thread )
509{
510  _Chain_Extract_unprotected( &thread->Object.Node );
511}
512
[48c4a55]513static inline void _Scheduler_SMP_Schedule_highest_ready(
[3730a07f]514  Scheduler_Context *context,
[48c4a55]515  Thread_Control *victim,
516  Scheduler_SMP_Get_highest_ready get_highest_ready,
517  Scheduler_SMP_Move move_from_ready_to_scheduled
518)
519{
[3730a07f]520  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
521  Thread_Control *highest_ready = ( *get_highest_ready )( &self->Base );
[48c4a55]522
[38b59a6]523  _Scheduler_SMP_Allocate_processor( self, highest_ready, victim );
[48c4a55]524
[3730a07f]525  ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
[48c4a55]526}
527
[c6522a65]528/**
[f39f667a]529 * @brief Blocks a thread.
[c6522a65]530 *
531 * @param[in] context The scheduler instance context.
532 * @param[in] thread The thread of the scheduling operation.
[f39f667a]533 * @param[in] extract_from_ready Function to extract a node from the set of
534 * ready nodes.
[c6522a65]535 * @param[in] get_highest_ready Function to get the highest ready node.
536 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
537 * of ready nodes to the set of scheduled nodes.
538 */
[f39f667a]539static inline void _Scheduler_SMP_Block(
[3730a07f]540  Scheduler_Context *context,
[48c4a55]541  Thread_Control *thread,
[f39f667a]542  Scheduler_SMP_Extract extract_from_ready,
[48c4a55]543  Scheduler_SMP_Get_highest_ready get_highest_ready,
544  Scheduler_SMP_Move move_from_ready_to_scheduled
545)
546{
[beab7329]547  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
[f39f667a]548  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
549
550  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
[beab7329]551
[f39f667a]552  if ( is_scheduled ) {
553    _Scheduler_SMP_Extract_from_scheduled( thread );
[48c4a55]554
555    _Scheduler_SMP_Schedule_highest_ready(
[3730a07f]556      context,
[48c4a55]557      thread,
558      get_highest_ready,
559      move_from_ready_to_scheduled
560    );
[f39f667a]561  } else {
562    ( *extract_from_ready )( context, thread );
[48c4a55]563  }
564}
565
[f39f667a]566static inline void _Scheduler_SMP_Change_priority(
[3730a07f]567  Scheduler_Context *context,
[48c4a55]568  Thread_Control *thread,
[f39f667a]569  Priority_Control new_priority,
570  bool prepend_it,
571  Scheduler_SMP_Extract extract_from_ready,
572  Scheduler_SMP_Update update,
573  Scheduler_SMP_Enqueue enqueue_fifo,
574  Scheduler_SMP_Enqueue enqueue_lifo
[48c4a55]575)
576{
[f39f667a]577  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
578  bool has_processor_allocated = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
[a336d51]579
[f39f667a]580  if ( has_processor_allocated ) {
581    _Scheduler_SMP_Extract_from_scheduled( thread );
582  } else {
583    ( *extract_from_ready )( context, thread );
584  }
[48c4a55]585
[f39f667a]586  ( *update )( context, &node->Base, new_priority );
587
588  if ( prepend_it ) {
589    ( *enqueue_lifo )( context, thread, has_processor_allocated );
590  } else {
591    ( *enqueue_fifo )( context, thread, has_processor_allocated );
592  }
[48c4a55]593}
594
595static inline void _Scheduler_SMP_Insert_scheduled_lifo(
[3730a07f]596  Scheduler_Context *context,
[48c4a55]597  Thread_Control *thread
598)
599{
[3730a07f]600  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
601
[48c4a55]602  _Chain_Insert_ordered_unprotected(
[494c2e3]603    &self->Scheduled,
[48c4a55]604    &thread->Object.Node,
605    _Scheduler_simple_Insert_priority_lifo_order
606  );
607}
608
609static inline void _Scheduler_SMP_Insert_scheduled_fifo(
[3730a07f]610  Scheduler_Context *context,
[48c4a55]611  Thread_Control *thread
612)
613{
[3730a07f]614  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
615
[48c4a55]616  _Chain_Insert_ordered_unprotected(
[494c2e3]617    &self->Scheduled,
[48c4a55]618    &thread->Object.Node,
619    _Scheduler_simple_Insert_priority_fifo_order
620  );
621}
622
[494c2e3]623static inline void _Scheduler_SMP_Start_idle(
[3730a07f]624  Scheduler_Context *context,
[494c2e3]625  Thread_Control *thread,
626  Per_CPU_Control *cpu
627)
628{
[3730a07f]629  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
[beab7329]630  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
631
632  node->state = SCHEDULER_SMP_NODE_SCHEDULED;
633
[494c2e3]634  _Thread_Set_CPU( thread, cpu );
635  _Chain_Append_unprotected( &self->Scheduled, &thread->Object.Node );
636}
637
[9d83f58a]638/** @} */
639
640#ifdef __cplusplus
641}
642#endif /* __cplusplus */
643
644#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.