source: rtems/cpukit/score/include/rtems/score/schedulersmpimpl.h @ 701dd96f

4.115
Last change on this file since 701dd96f was 701dd96f, checked in by Sebastian Huber <sebastian.huber@…>, on 06/12/14 at 12:37:57

score: PR2181: Add _Thread_Yield()

The _Scheduler_Yield() was called by the executing thread with thread
dispatching disabled and interrupts enabled. The rtems_task_suspend()
is explicitly allowed in ISRs:

http://rtems.org/onlinedocs/doc-current/share/rtems/html/c_user/Interrupt-Manager-Directives-Allowed-from-an-ISR.html#Interrupt-Manager-Directives-Allowed-from-an-ISR

Unlike the other scheduler operations the locking was performed inside
the operation. This lead to the following race condition. Suppose a
ISR suspends the executing thread right before the yield scheduler
operation. Now the executing thread is not longer in the set of ready
threads. The typical scheduler operations did not check the thread
state and will now extract the thread again and enqueue it. This
corrupted data structures.

Add _Thread_Yield() and do the scheduler yield operation with interrupts
disabled. This has a negligible effect on the interrupt latency.

  • Property mode set to 100644
File size: 19.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief SMP Scheduler Implementation
5 *
6 * @ingroup ScoreSchedulerSMP
7 */
8
9/*
10 * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
26#include <rtems/score/schedulersmp.h>
27#include <rtems/score/assert.h>
28#include <rtems/score/chainimpl.h>
29#include <rtems/score/schedulersimpleimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif /* __cplusplus */
34
35/**
36 * @addtogroup ScoreSchedulerSMP
37 *
38 * The scheduler nodes can be in four states
39 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
40 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
41 * - @ref SCHEDULER_SMP_NODE_READY.
42 *
43 * State transitions are triggered via basic operations
44 * - _Scheduler_SMP_Enqueue_ordered(),
45 * - _Scheduler_SMP_Enqueue_scheduled_ordered(), and
46 * - _Scheduler_SMP_Block().
47 *
48 * @dot
49 * digraph {
50 *   node [style="filled"];
51 *
52 *   bs [label="BLOCKED"];
53 *   ss [label="SCHEDULED", fillcolor="green"];
54 *   rs [label="READY", fillcolor="red"];
55 *
56 *   edge [label="enqueue"];
57 *   edge [fontcolor="darkgreen", color="darkgreen"];
58 *
59 *   bs -> ss;
60 *
61 *   edge [fontcolor="red", color="red"];
62 *
63 *   bs -> rs;
64 *
65 *   edge [label="enqueue other"];
66 *
67 *   ss -> rs;
68 *
69 *   edge [label="block"];
70 *   edge [fontcolor="black", color="black"];
71 *
72 *   ss -> bs;
73 *   rs -> bs;
74 *
75 *   edge [label="block other"];
76 *   edge [fontcolor="darkgreen", color="darkgreen"];
77 *
78 *   rs -> ss;
79 * }
80 * @enddot
81 *
82 * During system initialization each processor of the scheduler instance starts
83 * with an idle thread assigned to it.  Lets have a look at an example with two
84 * idle threads I and J with priority 5.  We also have blocked threads A, B and
85 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
86 * with respect to the thread priority from left to right in the below
87 * diagrams.  The highest priority node (lowest priority number) is the
88 * leftmost node.  Since the processor assignment is independent of the thread
89 * priority the processor indices may move from one state to the other.
90 *
91 * @dot
92 * digraph {
93 *   node [style="filled"];
94 *   edge [dir="none"];
95 *   subgraph {
96 *     rank = same;
97 *
98 *     i [label="I (5)", fillcolor="green"];
99 *     j [label="J (5)", fillcolor="green"];
100 *     a [label="A (1)"];
101 *     b [label="B (2)"];
102 *     c [label="C (3)"];
103 *     i -> j;
104 *   }
105 *
106 *   subgraph {
107 *     rank = same;
108 *
109 *     p0 [label="PROCESSOR 0", shape="box"];
110 *     p1 [label="PROCESSOR 1", shape="box"];
111 *   }
112 *
113 *   i -> p0;
114 *   j -> p1;
115 * }
116 * @enddot
117 *
118 * Lets start A.  For this an enqueue operation is performed.
119 *
120 * @dot
121 * digraph {
122 *   node [style="filled"];
123 *   edge [dir="none"];
124 *
125 *   subgraph {
126 *     rank = same;
127 *
128 *     i [label="I (5)", fillcolor="green"];
129 *     j [label="J (5)", fillcolor="red"];
130 *     a [label="A (1)", fillcolor="green"];
131 *     b [label="B (2)"];
132 *     c [label="C (3)"];
133 *     a -> i;
134 *   }
135 *
136 *   subgraph {
137 *     rank = same;
138 *
139 *     p0 [label="PROCESSOR 0", shape="box"];
140 *     p1 [label="PROCESSOR 1", shape="box"];
141 *   }
142 *
143 *   i -> p0;
144 *   a -> p1;
145 * }
146 * @enddot
147 *
148 * Lets start C.
149 *
150 * @dot
151 * digraph {
152 *   node [style="filled"];
153 *   edge [dir="none"];
154 *
155 *   subgraph {
156 *     rank = same;
157 *
158 *     a [label="A (1)", fillcolor="green"];
159 *     c [label="C (3)", fillcolor="green"];
160 *     i [label="I (5)", fillcolor="red"];
161 *     j [label="J (5)", fillcolor="red"];
162 *     b [label="B (2)"];
163 *     a -> c;
164 *     i -> j;
165 *   }
166 *
167 *   subgraph {
168 *     rank = same;
169 *
170 *     p0 [label="PROCESSOR 0", shape="box"];
171 *     p1 [label="PROCESSOR 1", shape="box"];
172 *   }
173 *
174 *   c -> p0;
175 *   a -> p1;
176 * }
177 * @enddot
178 *
179 * Lets start B.
180 *
181 * @dot
182 * digraph {
183 *   node [style="filled"];
184 *   edge [dir="none"];
185 *
186 *   subgraph {
187 *     rank = same;
188 *
189 *     a [label="A (1)", fillcolor="green"];
190 *     b [label="B (2)", fillcolor="green"];
191 *     c [label="C (3)", fillcolor="red"];
192 *     i [label="I (5)", fillcolor="red"];
193 *     j [label="J (5)", fillcolor="red"];
194 *     a -> b;
195 *     c -> i -> j;
196 *   }
197 *
198 *   subgraph {
199 *     rank = same;
200 *
201 *     p0 [label="PROCESSOR 0", shape="box"];
202 *     p1 [label="PROCESSOR 1", shape="box"];
203 *   }
204 *
205 *   b -> p0;
206 *   a -> p1;
207 * }
208 * @enddot
209 *
210 * Lets change the priority of thread A to 4.
211 *
212 * @dot
213 * digraph {
214 *   node [style="filled"];
215 *   edge [dir="none"];
216 *
217 *   subgraph {
218 *     rank = same;
219 *
220 *     b [label="B (2)", fillcolor="green"];
221 *     c [label="C (3)", fillcolor="green"];
222 *     a [label="A (4)", fillcolor="red"];
223 *     i [label="I (5)", fillcolor="red"];
224 *     j [label="J (5)", fillcolor="red"];
225 *     b -> c;
226 *     a -> i -> j;
227 *   }
228 *
229 *   subgraph {
230 *     rank = same;
231 *
232 *     p0 [label="PROCESSOR 0", shape="box"];
233 *     p1 [label="PROCESSOR 1", shape="box"];
234 *   }
235 *
236 *   b -> p0;
237 *   c -> p1;
238 * }
239 * @enddot
240 *
241 * Now perform a blocking operation with thread B.  Please note that thread A
242 * migrated now from processor 0 to processor 1 and thread C still executes on
243 * processor 1.
244 *
245 * @dot
246 * digraph {
247 *   node [style="filled"];
248 *   edge [dir="none"];
249 *
250 *   subgraph {
251 *     rank = same;
252 *
253 *     c [label="C (3)", fillcolor="green"];
254 *     a [label="A (4)", fillcolor="green"];
255 *     i [label="I (5)", fillcolor="red"];
256 *     j [label="J (5)", fillcolor="red"];
257 *     b [label="B (2)"];
258 *     c -> a;
259 *     i -> j;
260 *   }
261 *
262 *   subgraph {
263 *     rank = same;
264 *
265 *     p0 [label="PROCESSOR 0", shape="box"];
266 *     p1 [label="PROCESSOR 1", shape="box"];
267 *   }
268 *
269 *   a -> p0;
270 *   c -> p1;
271 * }
272 * @enddot
273 *
274 * @{
275 */
276
277typedef Thread_Control *( *Scheduler_SMP_Get_highest_ready )(
278  Scheduler_Context *context,
279  Thread_Control    *blocking
280);
281
282typedef Thread_Control *( *Scheduler_SMP_Get_lowest_scheduled )(
283  Scheduler_Context *context,
284  Thread_Control    *thread,
285  Chain_Node_order   order
286);
287
288typedef void ( *Scheduler_SMP_Extract )(
289  Scheduler_Context *context,
290  Thread_Control    *thread
291);
292
293typedef void ( *Scheduler_SMP_Insert )(
294  Scheduler_Context *context,
295  Thread_Control    *thread_to_insert
296);
297
298typedef void ( *Scheduler_SMP_Move )(
299  Scheduler_Context *context,
300  Thread_Control    *thread_to_move
301);
302
303typedef void ( *Scheduler_SMP_Update )(
304  Scheduler_Context *context,
305  Scheduler_Node    *node,
306  Priority_Control   new_priority
307);
308
309typedef void ( *Scheduler_SMP_Enqueue )(
310  Scheduler_Context *context,
311  Thread_Control    *thread_to_enqueue
312);
313
314typedef void ( *Scheduler_SMP_Allocate_processor )(
315  Scheduler_SMP_Context *self,
316  Thread_Control        *scheduled,
317  Thread_Control        *victim
318);
319
320static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
321  Scheduler_Context *context
322)
323{
324  return (Scheduler_SMP_Context *) context;
325}
326
327static inline void _Scheduler_SMP_Initialize(
328  Scheduler_SMP_Context *self
329)
330{
331  _Chain_Initialize_empty( &self->Scheduled );
332}
333
334static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_get(
335  Thread_Control *thread
336)
337{
338  return (Scheduler_SMP_Node *) _Scheduler_Node_get( thread );
339}
340
341static inline void _Scheduler_SMP_Node_initialize(
342  Scheduler_SMP_Node *node
343)
344{
345  node->state = SCHEDULER_SMP_NODE_BLOCKED;
346}
347
348extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
349
350static inline void _Scheduler_SMP_Node_change_state(
351  Scheduler_SMP_Node      *node,
352  Scheduler_SMP_Node_state new_state
353)
354{
355  _Assert(
356    _Scheduler_SMP_Node_valid_state_changes[ node->state ][ new_state ]
357  );
358
359  node->state = new_state;
360}
361
362static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
363  const Scheduler_SMP_Context *self,
364  const Per_CPU_Control       *cpu
365)
366{
367  return cpu->scheduler_context == &self->Base;
368}
369
370static inline void _Scheduler_SMP_Update_heir(
371  Per_CPU_Control *cpu_self,
372  Per_CPU_Control *cpu_for_heir,
373  Thread_Control  *heir
374)
375{
376  cpu_for_heir->heir = heir;
377
378  /*
379   * It is critical that we first update the heir and then the dispatch
380   * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
381   * update.
382   */
383  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
384
385  /*
386   * Only update the dispatch necessary indicator if not already set to
387   * avoid superfluous inter-processor interrupts.
388   */
389  if ( !cpu_for_heir->dispatch_necessary ) {
390    cpu_for_heir->dispatch_necessary = true;
391
392    if ( cpu_for_heir != cpu_self ) {
393      _Per_CPU_Send_interrupt( cpu_for_heir );
394    }
395  }
396}
397
398static inline void _Scheduler_SMP_Allocate_processor(
399  Scheduler_SMP_Context *self,
400  Thread_Control        *scheduled,
401  Thread_Control        *victim
402)
403{
404  Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled );
405  Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
406  Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim );
407  Per_CPU_Control *cpu_self = _Per_CPU_Get();
408  Thread_Control *heir;
409
410  _Scheduler_SMP_Node_change_state(
411    scheduled_node,
412    SCHEDULER_SMP_NODE_SCHEDULED
413  );
414
415  _Assert( _ISR_Get_level() != 0 );
416
417  if ( _Thread_Is_executing_on_a_processor( scheduled ) ) {
418    if ( _Scheduler_SMP_Is_processor_owned_by_us( self, cpu_of_scheduled ) ) {
419      heir = cpu_of_scheduled->heir;
420      _Scheduler_SMP_Update_heir( cpu_self, cpu_of_scheduled, scheduled );
421    } else {
422      /* We have to force a migration to our processor set */
423      _Assert( scheduled->debug_real_cpu->heir != scheduled );
424      heir = scheduled;
425    }
426  } else {
427    heir = scheduled;
428  }
429
430  if ( heir != victim ) {
431    _Thread_Set_CPU( heir, cpu_of_victim );
432    _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, heir );
433  }
434}
435
436static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
437  Scheduler_Context *context,
438  Thread_Control    *filter,
439  Chain_Node_order   order
440)
441{
442  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
443  Thread_Control *lowest_ready = NULL;
444  Chain_Control *scheduled = &self->Scheduled;
445
446  if ( !_Chain_Is_empty( scheduled ) ) {
447    lowest_ready = (Thread_Control *) _Chain_Last( scheduled );
448  }
449
450  /*
451   * _Scheduler_SMP_Enqueue_ordered() assumes that get_lowest_scheduled
452   * helpers may return NULL. But this method never should.
453   */
454  _Assert( lowest_ready != NULL );
455
456  return lowest_ready;
457}
458
459/**
460 * @brief Enqueues a thread according to the specified order function.
461 *
462 * The thread must not be in the scheduled state.
463 *
464 * @param[in] context The scheduler instance context.
465 * @param[in] thread The thread to enqueue.
466 * @param[in] order The order function.
467 * @param[in] insert_ready Function to insert a node into the set of ready
468 *   nodes.
469 * @param[in] insert_scheduled Function to insert a node into the set of
470 *   scheduled nodes.
471 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
472 *   of scheduled nodes to the set of ready nodes.
473 * @param[in] get_lowest_scheduled Function to select the thread from the
474 *   scheduled nodes to replace.  It may not be possible to find one, in this
475 *   case a pointer must be returned so that the order functions returns false
476 *   if this pointer is passed as the second argument to the order function.
477 * @param[in] allocate_processor Function to allocate a processor to a thread
478 *   based on the rules of the scheduler.
479 */
480static inline void _Scheduler_SMP_Enqueue_ordered(
481  Scheduler_Context                  *context,
482  Thread_Control                     *thread,
483  Chain_Node_order                    order,
484  Scheduler_SMP_Insert                insert_ready,
485  Scheduler_SMP_Insert                insert_scheduled,
486  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
487  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
488  Scheduler_SMP_Allocate_processor    allocate_processor
489)
490{
491  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
492  Thread_Control *lowest_scheduled =
493    ( *get_lowest_scheduled )( context, thread, order );
494
495  if ( ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) ) {
496    Scheduler_SMP_Node *lowest_scheduled_node =
497      _Scheduler_SMP_Node_get( lowest_scheduled );
498
499    _Scheduler_SMP_Node_change_state(
500      lowest_scheduled_node,
501      SCHEDULER_SMP_NODE_READY
502    );
503    ( *allocate_processor )( self, thread, lowest_scheduled );
504    ( *insert_scheduled )( &self->Base, thread );
505    ( *move_from_scheduled_to_ready )( &self->Base, lowest_scheduled );
506  } else {
507    ( *insert_ready )( &self->Base, thread );
508  }
509}
510
511/**
512 * @brief Enqueues a scheduled thread according to the specified order
513 * function.
514 *
515 * @param[in] context The scheduler instance context.
516 * @param[in] thread The thread to enqueue.
517 * @param[in] order The order function.
518 * @param[in] get_highest_ready Function to get the highest ready node.
519 * @param[in] insert_ready Function to insert a node into the set of ready
520 *   nodes.
521 * @param[in] insert_scheduled Function to insert a node into the set of
522 *   scheduled nodes.
523 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
524 *   of ready nodes to the set of scheduled nodes.
525 * @param[in] allocate_processor Function to allocate a processor to a thread
526 *   based on the rules of the scheduler.
527 */
528static inline void _Scheduler_SMP_Enqueue_scheduled_ordered(
529  Scheduler_Context                *context,
530  Thread_Control                   *thread,
531  Chain_Node_order                  order,
532  Scheduler_SMP_Get_highest_ready   get_highest_ready,
533  Scheduler_SMP_Insert              insert_ready,
534  Scheduler_SMP_Insert              insert_scheduled,
535  Scheduler_SMP_Move                move_from_ready_to_scheduled,
536  Scheduler_SMP_Allocate_processor  allocate_processor
537)
538{
539  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
540  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
541  Thread_Control *highest_ready =
542    ( *get_highest_ready )( &self->Base, thread );
543
544  _Assert( highest_ready != NULL );
545
546  /*
547   * The thread has been extracted from the scheduled chain.  We have to place
548   * it now on the scheduled or ready set.
549   */
550  if ( ( *order )( &thread->Object.Node, &highest_ready->Object.Node ) ) {
551    ( *insert_scheduled )( &self->Base, thread );
552  } else {
553    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
554    ( *allocate_processor) ( self, highest_ready, thread );
555    ( *insert_ready )( &self->Base, thread );
556    ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
557  }
558}
559
560static inline void _Scheduler_SMP_Extract_from_scheduled(
561  Thread_Control *thread
562)
563{
564  _Chain_Extract_unprotected( &thread->Object.Node );
565}
566
567static inline void _Scheduler_SMP_Schedule_highest_ready(
568  Scheduler_Context                *context,
569  Thread_Control                   *victim,
570  Scheduler_SMP_Get_highest_ready   get_highest_ready,
571  Scheduler_SMP_Move                move_from_ready_to_scheduled,
572  Scheduler_SMP_Allocate_processor  allocate_processor
573)
574{
575  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
576  Thread_Control *highest_ready =
577    ( *get_highest_ready )( &self->Base, victim );
578
579  ( *allocate_processor )( self, highest_ready, victim );
580
581  ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
582}
583
584/**
585 * @brief Blocks a thread.
586 *
587 * @param[in] context The scheduler instance context.
588 * @param[in] thread The thread of the scheduling operation.
589 * @param[in] extract_from_ready Function to extract a node from the set of
590 * ready nodes.
591 * @param[in] get_highest_ready Function to get the highest ready node.
592 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
593 * of ready nodes to the set of scheduled nodes.
594 */
595static inline void _Scheduler_SMP_Block(
596  Scheduler_Context                *context,
597  Thread_Control                   *thread,
598  Scheduler_SMP_Extract             extract_from_ready,
599  Scheduler_SMP_Get_highest_ready   get_highest_ready,
600  Scheduler_SMP_Move                move_from_ready_to_scheduled,
601  Scheduler_SMP_Allocate_processor  allocate_processor
602)
603{
604  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
605  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
606
607  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
608
609  if ( is_scheduled ) {
610    _Scheduler_SMP_Extract_from_scheduled( thread );
611
612    _Scheduler_SMP_Schedule_highest_ready(
613      context,
614      thread,
615      get_highest_ready,
616      move_from_ready_to_scheduled,
617      allocate_processor
618    );
619  } else {
620    ( *extract_from_ready )( context, thread );
621  }
622}
623
624static inline void _Scheduler_SMP_Unblock(
625  Scheduler_Context     *context,
626  Thread_Control        *thread,
627  Scheduler_SMP_Enqueue  enqueue_fifo
628)
629{
630  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
631
632  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
633
634  ( *enqueue_fifo )( context, thread );
635}
636
637static inline void _Scheduler_SMP_Change_priority(
638  Scheduler_Context     *context,
639  Thread_Control        *thread,
640  Priority_Control       new_priority,
641  bool                   prepend_it,
642  Scheduler_SMP_Extract  extract_from_ready,
643  Scheduler_SMP_Update   update,
644  Scheduler_SMP_Enqueue  enqueue_fifo,
645  Scheduler_SMP_Enqueue  enqueue_lifo,
646  Scheduler_SMP_Enqueue  enqueue_scheduled_fifo,
647  Scheduler_SMP_Enqueue  enqueue_scheduled_lifo
648)
649{
650  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
651
652  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
653    _Scheduler_SMP_Extract_from_scheduled( thread );
654
655    ( *update )( context, &node->Base, new_priority );
656
657    if ( prepend_it ) {
658      ( *enqueue_scheduled_lifo )( context, thread );
659    } else {
660      ( *enqueue_scheduled_fifo )( context, thread );
661    }
662  } else {
663    ( *extract_from_ready )( context, thread );
664
665    ( *update )( context, &node->Base, new_priority );
666
667    if ( prepend_it ) {
668      ( *enqueue_lifo )( context, thread );
669    } else {
670      ( *enqueue_fifo )( context, thread );
671    }
672  }
673}
674
675static inline void _Scheduler_SMP_Yield(
676  Scheduler_Context     *context,
677  Thread_Control        *thread,
678  Scheduler_SMP_Extract  extract_from_ready,
679  Scheduler_SMP_Enqueue  enqueue_fifo,
680  Scheduler_SMP_Enqueue  enqueue_scheduled_fifo
681)
682{
683  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
684
685  if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
686    _Scheduler_SMP_Extract_from_scheduled( thread );
687
688    ( *enqueue_scheduled_fifo )( context, thread );
689  } else {
690    ( *extract_from_ready )( context, thread );
691
692    ( *enqueue_fifo )( context, thread );
693  }
694}
695
696static inline void _Scheduler_SMP_Insert_scheduled_lifo(
697  Scheduler_Context *context,
698  Thread_Control    *thread
699)
700{
701  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
702
703  _Chain_Insert_ordered_unprotected(
704    &self->Scheduled,
705    &thread->Object.Node,
706    _Scheduler_simple_Insert_priority_lifo_order
707  );
708}
709
710static inline void _Scheduler_SMP_Insert_scheduled_fifo(
711  Scheduler_Context *context,
712  Thread_Control    *thread
713)
714{
715  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
716
717  _Chain_Insert_ordered_unprotected(
718    &self->Scheduled,
719    &thread->Object.Node,
720    _Scheduler_simple_Insert_priority_fifo_order
721  );
722}
723
724/** @} */
725
726#ifdef __cplusplus
727}
728#endif /* __cplusplus */
729
730#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.