source: rtems/cpukit/include/rtems/score/schedulersmpimpl.h @ 7b85efb8

Last change on this file since 7b85efb8 was 7b85efb8, checked in by Joel Sherrill <joel@…>, on 02/16/22 at 21:16:11

cpukit/include/rtems/score/[s-z]*.h: Change license to BSD-2

Updates #3053.

  • Property mode set to 100644
File size: 63.3 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSScoreSchedulerSMP
7 *
8 * @brief This header file provides interfaces of the
9 *   @ref RTEMSScoreSchedulerSMP which are only used by the implementation.
10 */
11
12/*
13 * Copyright (c) 2013, 2021 embedded brains GmbH.  All rights reserved.
14 *
15 *  embedded brains GmbH
16 *  Dornierstr. 4
17 *  82178 Puchheim
18 *  Germany
19 *  <rtems@embedded-brains.de>
20 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
23 * are met:
24 * 1. Redistributions of source code must retain the above copyright
25 *    notice, this list of conditions and the following disclaimer.
26 * 2. Redistributions in binary form must reproduce the above copyright
27 *    notice, this list of conditions and the following disclaimer in the
28 *    documentation and/or other materials provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
44#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
45
46#include <rtems/score/schedulersmp.h>
47#include <rtems/score/assert.h>
48#include <rtems/score/chainimpl.h>
49#include <rtems/score/schedulersimpleimpl.h>
50#include <rtems/bspIo.h>
51
52#ifdef __cplusplus
53extern "C" {
54#endif /* __cplusplus */
55
56/**
57 * @addtogroup RTEMSScoreSchedulerSMP
58 *
59 * The scheduler nodes can be in four states
60 * - @ref SCHEDULER_SMP_NODE_BLOCKED,
61 * - @ref SCHEDULER_SMP_NODE_SCHEDULED, and
62 * - @ref SCHEDULER_SMP_NODE_READY.
63 *
64 * State transitions are triggered via basic operations
65 * - _Scheduler_SMP_Enqueue(),
66 * - _Scheduler_SMP_Enqueue_scheduled(), and
67 * - _Scheduler_SMP_Block().
68 *
69 * @dot
70 * digraph {
71 *   node [style="filled"];
72 *
73 *   bs [label="BLOCKED"];
74 *   ss [label="SCHEDULED", fillcolor="green"];
75 *   rs [label="READY", fillcolor="red"];
76 *
77 *   edge [label="enqueue"];
78 *   edge [fontcolor="darkgreen", color="darkgreen"];
79 *
80 *   bs -> ss;
81 *
82 *   edge [fontcolor="red", color="red"];
83 *
84 *   bs -> rs;
85 *
86 *   edge [label="enqueue other"];
87 *
88 *   ss -> rs;
89 *
90 *   edge [label="block"];
91 *   edge [fontcolor="black", color="black"];
92 *
93 *   ss -> bs;
94 *   rs -> bs;
95 *
96 *   edge [label="block other"];
97 *   edge [fontcolor="darkgreen", color="darkgreen"];
98 *
99 *   rs -> ss;
100 * }
101 * @enddot
102 *
103 * During system initialization each processor of the scheduler instance starts
104 * with an idle thread assigned to it.  Lets have a look at an example with two
105 * idle threads I and J with priority 5.  We also have blocked threads A, B and
106 * C with priorities 1, 2 and 3 respectively.  The scheduler nodes are ordered
107 * with respect to the thread priority from left to right in the below
108 * diagrams.  The highest priority node (lowest priority number) is the
109 * leftmost node.  Since the processor assignment is independent of the thread
110 * priority the processor indices may move from one state to the other.
111 *
112 * @dot
113 * digraph {
114 *   node [style="filled"];
115 *   edge [dir="none"];
116 *   subgraph {
117 *     rank = same;
118 *
119 *     i [label="I (5)", fillcolor="green"];
120 *     j [label="J (5)", fillcolor="green"];
121 *     a [label="A (1)"];
122 *     b [label="B (2)"];
123 *     c [label="C (3)"];
124 *     i -> j;
125 *   }
126 *
127 *   subgraph {
128 *     rank = same;
129 *
130 *     p0 [label="PROCESSOR 0", shape="box"];
131 *     p1 [label="PROCESSOR 1", shape="box"];
132 *   }
133 *
134 *   i -> p0;
135 *   j -> p1;
136 * }
137 * @enddot
138 *
139 * Lets start A.  For this an enqueue operation is performed.
140 *
141 * @dot
142 * digraph {
143 *   node [style="filled"];
144 *   edge [dir="none"];
145 *
146 *   subgraph {
147 *     rank = same;
148 *
149 *     i [label="I (5)", fillcolor="green"];
150 *     j [label="J (5)", fillcolor="red"];
151 *     a [label="A (1)", fillcolor="green"];
152 *     b [label="B (2)"];
153 *     c [label="C (3)"];
154 *     a -> i;
155 *   }
156 *
157 *   subgraph {
158 *     rank = same;
159 *
160 *     p0 [label="PROCESSOR 0", shape="box"];
161 *     p1 [label="PROCESSOR 1", shape="box"];
162 *   }
163 *
164 *   i -> p0;
165 *   a -> p1;
166 * }
167 * @enddot
168 *
169 * Lets start C.
170 *
171 * @dot
172 * digraph {
173 *   node [style="filled"];
174 *   edge [dir="none"];
175 *
176 *   subgraph {
177 *     rank = same;
178 *
179 *     a [label="A (1)", fillcolor="green"];
180 *     c [label="C (3)", fillcolor="green"];
181 *     i [label="I (5)", fillcolor="red"];
182 *     j [label="J (5)", fillcolor="red"];
183 *     b [label="B (2)"];
184 *     a -> c;
185 *     i -> j;
186 *   }
187 *
188 *   subgraph {
189 *     rank = same;
190 *
191 *     p0 [label="PROCESSOR 0", shape="box"];
192 *     p1 [label="PROCESSOR 1", shape="box"];
193 *   }
194 *
195 *   c -> p0;
196 *   a -> p1;
197 * }
198 * @enddot
199 *
200 * Lets start B.
201 *
202 * @dot
203 * digraph {
204 *   node [style="filled"];
205 *   edge [dir="none"];
206 *
207 *   subgraph {
208 *     rank = same;
209 *
210 *     a [label="A (1)", fillcolor="green"];
211 *     b [label="B (2)", fillcolor="green"];
212 *     c [label="C (3)", fillcolor="red"];
213 *     i [label="I (5)", fillcolor="red"];
214 *     j [label="J (5)", fillcolor="red"];
215 *     a -> b;
216 *     c -> i -> j;
217 *   }
218 *
219 *   subgraph {
220 *     rank = same;
221 *
222 *     p0 [label="PROCESSOR 0", shape="box"];
223 *     p1 [label="PROCESSOR 1", shape="box"];
224 *   }
225 *
226 *   b -> p0;
227 *   a -> p1;
228 * }
229 * @enddot
230 *
231 * Lets change the priority of thread A to 4.
232 *
233 * @dot
234 * digraph {
235 *   node [style="filled"];
236 *   edge [dir="none"];
237 *
238 *   subgraph {
239 *     rank = same;
240 *
241 *     b [label="B (2)", fillcolor="green"];
242 *     c [label="C (3)", fillcolor="green"];
243 *     a [label="A (4)", fillcolor="red"];
244 *     i [label="I (5)", fillcolor="red"];
245 *     j [label="J (5)", fillcolor="red"];
246 *     b -> c;
247 *     a -> i -> j;
248 *   }
249 *
250 *   subgraph {
251 *     rank = same;
252 *
253 *     p0 [label="PROCESSOR 0", shape="box"];
254 *     p1 [label="PROCESSOR 1", shape="box"];
255 *   }
256 *
257 *   b -> p0;
258 *   c -> p1;
259 * }
260 * @enddot
261 *
262 * Now perform a blocking operation with thread B.  Please note that thread A
263 * migrated now from processor 0 to processor 1 and thread C still executes on
264 * processor 1.
265 *
266 * @dot
267 * digraph {
268 *   node [style="filled"];
269 *   edge [dir="none"];
270 *
271 *   subgraph {
272 *     rank = same;
273 *
274 *     c [label="C (3)", fillcolor="green"];
275 *     a [label="A (4)", fillcolor="green"];
276 *     i [label="I (5)", fillcolor="red"];
277 *     j [label="J (5)", fillcolor="red"];
278 *     b [label="B (2)"];
279 *     c -> a;
280 *     i -> j;
281 *   }
282 *
283 *   subgraph {
284 *     rank = same;
285 *
286 *     p0 [label="PROCESSOR 0", shape="box"];
287 *     p1 [label="PROCESSOR 1", shape="box"];
288 *   }
289 *
290 *   a -> p0;
291 *   c -> p1;
292 * }
293 * @enddot
294 *
295 * @{
296 */
297
298typedef bool ( *Scheduler_SMP_Has_ready )(
299  Scheduler_Context *context
300);
301
302typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
303  Scheduler_Context *context,
304  Scheduler_Node    *filter
305);
306
307typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_ready )(
308  Scheduler_Context *context
309);
310
311typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
312  Scheduler_Context *context,
313  Scheduler_Node    *filter
314);
315
316typedef void ( *Scheduler_SMP_Extract )(
317  Scheduler_Context *context,
318  Scheduler_Node    *node_to_extract
319);
320
321typedef void ( *Scheduler_SMP_Insert )(
322  Scheduler_Context *context,
323  Scheduler_Node    *node_to_insert,
324  Priority_Control   insert_priority
325);
326
327typedef void ( *Scheduler_SMP_Move )(
328  Scheduler_Context *context,
329  Scheduler_Node    *node_to_move
330);
331
332typedef bool ( *Scheduler_SMP_Ask_for_help )(
333  Scheduler_Context *context,
334  Thread_Control    *thread,
335  Scheduler_Node    *node
336);
337
338typedef void ( *Scheduler_SMP_Update )(
339  Scheduler_Context *context,
340  Scheduler_Node    *node_to_update,
341  Priority_Control   new_priority
342);
343
344typedef void ( *Scheduler_SMP_Set_affinity )(
345  Scheduler_Context *context,
346  Scheduler_Node    *node,
347  void              *arg
348);
349
350typedef bool ( *Scheduler_SMP_Enqueue )(
351  Scheduler_Context *context,
352  Scheduler_Node    *node_to_enqueue,
353  Priority_Control   priority
354);
355
356typedef void ( *Scheduler_SMP_Enqueue_scheduled )(
357  Scheduler_Context *context,
358  Scheduler_Node    *node_to_enqueue,
359  Priority_Control   priority
360);
361
362typedef void ( *Scheduler_SMP_Allocate_processor )(
363  Scheduler_Context *context,
364  Scheduler_Node    *scheduled,
365  Per_CPU_Control   *cpu
366);
367
368typedef void ( *Scheduler_SMP_Register_idle )(
369  Scheduler_Context *context,
370  Scheduler_Node    *idle,
371  Per_CPU_Control   *cpu
372);
373
374/**
375 * @brief Does nothing.
376 *
377 * @param context This parameter is unused.
378 * @param idle This parameter is unused.
379 * @param cpu This parameter is unused.
380 */
381static inline void _Scheduler_SMP_Do_nothing_register_idle(
382  Scheduler_Context *context,
383  Scheduler_Node    *idle,
384  Per_CPU_Control   *cpu
385)
386{
387  (void) context;
388  (void) idle;
389  (void) cpu;
390}
391
392/**
393 * @brief Checks if @a to_insert is less or equal than the priority of the chain node.
394 *
395 * @param key is the priority to compare.
396 *
397 * @param to_insert is the chain node to insert.
398 *
399 * @param next is the chain node to compare the priority of.
400 *
401 * @retval true @a to_insert is less or equal than the priority of @a next.
402 * @retval false @a to_insert is greater than the priority of @a next.
403 */
404static inline bool _Scheduler_SMP_Priority_less_equal(
405  const void       *key,
406  const Chain_Node *to_insert,
407  const Chain_Node *next
408)
409{
410  const Priority_Control   *priority_to_insert;
411  const Scheduler_SMP_Node *node_next;
412
413  (void) to_insert;
414  priority_to_insert = (const Priority_Control *) key;
415  node_next = (const Scheduler_SMP_Node *) next;
416
417  return *priority_to_insert <= node_next->priority;
418}
419
420/**
421 * @brief Gets the scheduler smp context.
422 *
423 * @param context The context to cast to Scheduler_SMP_Context *.
424 *
425 * @return @a context cast to Scheduler_SMP_Context *.
426 */
427static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
428  Scheduler_Context *context
429)
430{
431  return (Scheduler_SMP_Context *) context;
432}
433
434/**
435 * @brief Initializes the scheduler smp context.
436 *
437 * @param[out] self The context to initialize.
438 */
439static inline void _Scheduler_SMP_Initialize(
440  Scheduler_SMP_Context *self
441)
442{
443  _Chain_Initialize_empty( &self->Scheduled );
444}
445
446/**
447 * @brief Gets the scheduler smp node of the thread.
448 *
449 * @param thread The thread to get the smp node of.
450 *
451 * @return The scheduler smp node of @a thread.
452 */
453static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
454  Thread_Control *thread
455)
456{
457  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
458}
459
460/**
461 * @brief Gets the scheduler smp node of the thread.
462 *
463 * @param thread The thread to get the smp node of.
464 *
465 * @return The scheduler smp node of @a thread.
466 */
467static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
468  Thread_Control *thread
469)
470{
471  return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
472}
473
474/**
475 * @brief Gets the scheduler smp node.
476 *
477 * @param node The node to cast to Scheduler_SMP_Node *.
478 *
479 * @return @a node cast to Scheduler_SMP_Node *.
480 */
481static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
482  Scheduler_Node *node
483)
484{
485  return (Scheduler_SMP_Node *) node;
486}
487
488/**
489 * @brief Gets the state of the node.
490 *
491 * @param node The node to get the state of.
492 *
493 * @return The state of @a node.
494 */
495static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
496  const Scheduler_Node *node
497)
498{
499  return ( (const Scheduler_SMP_Node *) node )->state;
500}
501
502/**
503 * @brief Gets the priority of the node.
504 *
505 * @param node The node to get the priority of.
506 *
507 * @return The priority of @a node.
508 */
509static inline Priority_Control _Scheduler_SMP_Node_priority(
510  const Scheduler_Node *node
511)
512{
513  return ( (const Scheduler_SMP_Node *) node )->priority;
514}
515
516/**
517 * @brief Initializes the scheduler smp node.
518 *
519 * @param scheduler The scheduler instance.
520 * @param[out] node The node to initialize.
521 * @param thread The thread of the scheduler smp node.
522 * @param priority The priority to initialize @a node with.
523 */
524static inline void _Scheduler_SMP_Node_initialize(
525  const Scheduler_Control *scheduler,
526  Scheduler_SMP_Node      *node,
527  Thread_Control          *thread,
528  Priority_Control         priority
529)
530{
531  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
532  node->state = SCHEDULER_SMP_NODE_BLOCKED;
533  node->priority = priority;
534}
535
536/**
537 * @brief Updates the priority of the node to the new priority.
538 *
539 * @param[out] node The node to update the priority of.
540 * @param new_priority The new priority for @a node.
541 */
542static inline void _Scheduler_SMP_Node_update_priority(
543  Scheduler_SMP_Node *node,
544  Priority_Control    new_priority
545)
546{
547  node->priority = new_priority;
548}
549
550/**
551 * @brief Changes the state of the node to the given state.
552 *
553 * @param[out] node the node to change the state of.
554 * @param new_state The new state for @a node.
555 */
556static inline void _Scheduler_SMP_Node_change_state(
557  Scheduler_Node           *node,
558  Scheduler_SMP_Node_state  new_state
559)
560{
561  Scheduler_SMP_Node *the_node;
562
563  the_node = _Scheduler_SMP_Node_downcast( node );
564  the_node->state = new_state;
565}
566
567/**
568 * @brief Checks if the processor is owned by the given context.
569 *
570 * @param context The context to check whether @a cpu is owned by it.
571 * @param cpu The cpu to check whether it is owned by @a context.
572 *
573 * @retval true @a cpu is owned by @a context.
574 * @retval false @a cpu is not owned by @a context.
575 */
576static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
577  const Scheduler_Context *context,
578  const Per_CPU_Control   *cpu
579)
580{
581  return cpu->Scheduler.context == context;
582}
583
584/**
585 * @brief Removes the thread's ask for help request from the processor.
586 *
587 * The caller must be the owner of the thread's scheduler lock.
588 *
589 * @param[in, out] thread is the thread of the ask for help request.
590 *
591 * @param[in, out] cpu is the processor from which the ask for help request
592 *   should be removed.
593 */
594void _Scheduler_SMP_Remove_ask_for_help_from_processor(
595  Thread_Control  *thread,
596  Per_CPU_Control *cpu
597);
598
599/**
600 * @brief Cancels the thread's ask for help request.
601 *
602 * The caller must be the owner of the thread's scheduler lock.
603 *
604 * @param[in, out] thread is the thread of the ask help request.
605 */
606static inline void _Scheduler_SMP_Cancel_ask_for_help( Thread_Control *thread )
607{
608  Per_CPU_Control *cpu;
609
610  _Assert( _ISR_lock_Is_owner( &thread->Scheduler.Lock ) );
611  cpu = thread->Scheduler.ask_for_help_cpu;
612
613  if ( RTEMS_PREDICT_FALSE( cpu != NULL ) ) {
614    _Scheduler_SMP_Remove_ask_for_help_from_processor( thread, cpu );
615  }
616}
617
618/**
619 * @brief Requests to ask for help for the thread.
620 *
621 * The actual ask for help operations are carried out during
622 * _Thread_Do_dispatch() on the current processor.
623 *
624 * An alternative approach would be to carry out the requests on a processor
625 * related to the thread.  This could reduce the overhead for the preempting
626 * thread a bit, however, there are at least two problems with this approach.
627 * Firstly, we have to figure out what is a processor related to the thread.
628 * Secondly, we may need an inter-processor interrupt.
629 *
630 * @param[in, out] thread is the thread in need for help.
631 */
632static inline void _Scheduler_SMP_Request_ask_for_help( Thread_Control *thread )
633{
634  ISR_lock_Context lock_context;
635  Per_CPU_Control *cpu_self;
636
637  cpu_self = _Per_CPU_Get();
638
639  _Assert( thread->Scheduler.ask_for_help_cpu == NULL );
640  thread->Scheduler.ask_for_help_cpu = cpu_self;
641  cpu_self->dispatch_necessary = true;
642
643  _Per_CPU_Acquire( cpu_self, &lock_context );
644  _Chain_Append_unprotected(
645    &cpu_self->Threads_in_need_for_help,
646    &thread->Scheduler.Help_node
647  );
648  _Per_CPU_Release( cpu_self, &lock_context );
649}
650
651/**
652 * @brief This enumeration defines what a scheduler should do with a node which
653 * could be scheduled.
654 */
655typedef enum {
656  SCHEDULER_SMP_DO_SCHEDULE,
657  SCHEDULER_SMP_DO_NOT_SCHEDULE
658} Scheduler_SMP_Action;
659
660/**
661 * @brief Tries to schedule the scheduler node.
662 *
663 * When an SMP scheduler needs to schedule a node, it shall use this function
664 * to determine what it shall do with the node.
665 *
666 * This function uses the state of the node and the scheduler state of the
667 * owner thread to determine what shall be done.  Each scheduler maintains its
668 * nodes independent of other schedulers.  This function ensures that a thread
669 * is scheduled by at most one scheduler.  If a node requires an executing
670 * thread due to some locking protocol and the owner thread is already
671 * scheduled by another scheduler, then an idle thread will be attached to the
672 * node.
673 *
674 * @param[in, out] node is the node which should be scheduled.
675 *
676 * @param get_idle_node is the get idle node handler.
677 *
678 * @param arg is the get idle node handler argument.
679 *
680 * @retval SCHEDULER_SMP_DO_SCHEDULE The node shall be scheduled.
681 *
682 * @retval SCHEDULER_SMP_DO_NOT_SCHEDULE The node shall be blocked.  This
683 *   action is returned, if the owner thread is already scheduled by another
684 *   scheduler.
685 */
686static inline Scheduler_SMP_Action _Scheduler_SMP_Try_to_schedule(
687  Scheduler_Node          *node,
688  Scheduler_Get_idle_node  get_idle_node,
689  void                    *arg
690)
691{
692  ISR_lock_Context        lock_context;
693  Thread_Control         *owner;
694  Thread_Scheduler_state  owner_state;
695  int                     owner_sticky_level;
696
697  owner = _Scheduler_Node_get_owner( node );
698  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
699
700  _Thread_Scheduler_acquire_critical( owner, &lock_context );
701  owner_state = owner->Scheduler.state;
702  owner_sticky_level = node->sticky_level;
703
704  if ( RTEMS_PREDICT_TRUE( owner_state == THREAD_SCHEDULER_READY ) ) {
705    _Scheduler_SMP_Cancel_ask_for_help( owner );
706    _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
707    _Thread_Scheduler_release_critical( owner, &lock_context );
708    return SCHEDULER_SMP_DO_SCHEDULE;
709  }
710
711  _Thread_Scheduler_release_critical( owner, &lock_context );
712
713  if (
714    ( owner_state == THREAD_SCHEDULER_SCHEDULED && owner_sticky_level <= 1 ) ||
715    owner_sticky_level == 0
716  ) {
717    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
718
719    return SCHEDULER_SMP_DO_NOT_SCHEDULE;
720  }
721
722  (void) _Scheduler_Use_idle_thread( node, get_idle_node, arg );
723
724  return SCHEDULER_SMP_DO_SCHEDULE;
725}
726
727/**
728 * @brief Allocates a processor to the user of the scheduled node.
729 *
730 * Attempts to prevent migrations but does not take into account affinity.
731 *
732 * @param[in, out] context is the scheduler context.
733 *
734 * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
735 *
736 * @param[in, out] cpu is the processor to allocate.
737 */
738static inline void _Scheduler_SMP_Allocate_processor_lazy(
739  Scheduler_Context *context,
740  Scheduler_Node    *scheduled,
741  Per_CPU_Control   *cpu
742)
743{
744  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
745  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
746  Per_CPU_Control *cpu_self = _Per_CPU_Get();
747
748  _Assert( _ISR_Get_level() != 0 );
749
750  if ( cpu == scheduled_cpu ) {
751    _Thread_Set_CPU( scheduled_thread, cpu );
752    _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
753
754    return;
755  }
756
757  if (
758    _Thread_Is_executing_on_a_processor( scheduled_thread ) &&
759    _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu )
760  ) {
761    Thread_Control *heir = scheduled_cpu->heir;
762    _Thread_Dispatch_update_heir( cpu_self, scheduled_cpu, scheduled_thread );
763    _Thread_Set_CPU( heir, cpu );
764    _Thread_Dispatch_update_heir( cpu_self, cpu, heir );
765
766    return;
767  }
768
769  _Thread_Set_CPU( scheduled_thread, cpu );
770  _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
771}
772
773/**
774 * @brief Allocates exactly the processor to the user of the scheduled node.
775 *
776 * This method is slightly different from
777 * _Scheduler_SMP_Allocate_processor_lazy() in that it does what it is asked to
778 * do.  _Scheduler_SMP_Allocate_processor_lazy() attempts to prevent migrations
779 * but does not take into account affinity.
780 *
781 * @param[in, out] context is the scheduler context.
782 *
783 * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
784 *
785 * @param[in, out] cpu is the processor to allocate.
786 */
787static inline void _Scheduler_SMP_Allocate_processor_exact(
788  Scheduler_Context *context,
789  Scheduler_Node    *scheduled,
790  Per_CPU_Control   *cpu
791)
792{
793  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
794  Per_CPU_Control *cpu_self = _Per_CPU_Get();
795
796  (void) context;
797
798  _Thread_Set_CPU( scheduled_thread, cpu );
799  _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
800}
801
802/**
803 * @brief Allocates the processor to the user of the scheduled node using the
804 *   given allocation handler.
805 *
806 * @param[in, out] context is the scheduler context.
807 *
808 * @param[in, out] scheduled is the scheduled node that gets the processor allocated.
809 *
810 * @param[in, out] cpu is the processor to allocate.
811 *
812 * @param allocate_processor is the handler which should allocate the processor.
813 */
814static inline void _Scheduler_SMP_Allocate_processor(
815  Scheduler_Context                *context,
816  Scheduler_Node                   *scheduled,
817  Per_CPU_Control                  *cpu,
818  Scheduler_SMP_Allocate_processor  allocate_processor
819)
820{
821  _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
822  ( *allocate_processor )( context, scheduled, cpu );
823}
824
825/**
826 * @brief Preempts the victim's thread and allocates a processor for the user
827 *   of the scheduled node.
828 *
829 * @param[in, out] context is the scheduler context.
830 *
831 * @param scheduled[in, out] is the node of the user thread that is about to
832 *   get a processor allocated.
833 *
834 * @param[in, out] victim is the victim node of the thread to preempt.
835 *
836 * @param[in, out] victim_idle is the idle thread used by the victim node or NULL.
837 *
838 * @param allocate_processor The function for allocation of a processor for the new thread.
839 */
840static inline void _Scheduler_SMP_Preempt(
841  Scheduler_Context                *context,
842  Scheduler_Node                   *scheduled,
843  Scheduler_Node                   *victim,
844  Thread_Control                   *victim_idle,
845  Scheduler_SMP_Allocate_processor  allocate_processor
846)
847{
848  Thread_Control   *victim_owner;
849  ISR_lock_Context  lock_context;
850  Per_CPU_Control  *cpu;
851
852  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
853
854  victim_owner = _Scheduler_Node_get_owner( victim );
855  _Thread_Scheduler_acquire_critical( victim_owner, &lock_context );
856
857  if ( RTEMS_PREDICT_TRUE( victim_idle == NULL ) ) {
858    if ( victim_owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
859      _Scheduler_Thread_change_state( victim_owner, THREAD_SCHEDULER_READY );
860
861      if ( victim_owner->Scheduler.helping_nodes > 0 ) {
862        _Scheduler_SMP_Request_ask_for_help( victim_owner );
863      }
864    }
865
866    cpu = _Thread_Get_CPU( victim_owner );
867  } else {
868    cpu = _Thread_Get_CPU( victim_idle );
869  }
870
871  _Thread_Scheduler_release_critical( victim_owner, &lock_context );
872
873  _Scheduler_SMP_Allocate_processor(
874    context,
875    scheduled,
876    cpu,
877    allocate_processor
878  );
879}
880
881/**
882 * @brief Returns the lowest member of the scheduled nodes.
883 *
884 * @param context The scheduler context instance.
885 * @param filter This parameter is unused.
886 *
887 * @return The lowest scheduled node.
888 */
889static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
890  Scheduler_Context *context,
891  Scheduler_Node    *filter
892)
893{
894  Scheduler_SMP_Context *self;
895  Scheduler_Node        *lowest_scheduled;
896
897  (void) filter;
898
899  self = _Scheduler_SMP_Get_self( context );
900
901  _Assert( !_Chain_Is_empty( &self->Scheduled ) );
902  lowest_scheduled = (Scheduler_Node *) _Chain_Last( &self->Scheduled );
903
904  _Assert(
905    _Chain_Next( &lowest_scheduled->Node.Chain ) ==
906      _Chain_Tail( &self->Scheduled )
907  );
908
909  return lowest_scheduled;
910}
911
912/**
913 * @brief Tries to schedule the given node.
914 *
915 * Schedules the node, or blocks if that is necessary.
916 *
917 * @param context The scheduler context instance.
918 * @param[in, out] node The node to insert into the scheduled nodes.
919 * @param priority The priority of @a node.
920 * @param[in, out] lowest_scheduled The lowest member of the scheduled nodes.
921 * @param insert_scheduled Function to insert a node into the set of
922 *   scheduled nodes.
923 * @param move_from_scheduled_to_ready Function to move a node from the set
924 *   of scheduled nodes to the set of ready nodes.
925 * @param allocate_processor Function to allocate a processor to a node
926 *   based on the rules of the scheduler.
927 */
928static inline void _Scheduler_SMP_Enqueue_to_scheduled(
929  Scheduler_Context                *context,
930  Scheduler_Node                   *node,
931  Priority_Control                  priority,
932  Scheduler_Node                   *lowest_scheduled,
933  Scheduler_SMP_Insert              insert_scheduled,
934  Scheduler_SMP_Move                move_from_scheduled_to_ready,
935  Scheduler_SMP_Move                move_from_ready_to_scheduled,
936  Scheduler_SMP_Allocate_processor  allocate_processor,
937  Scheduler_Get_idle_node           get_idle_node,
938  Scheduler_Release_idle_node       release_idle_node
939)
940{
941  Thread_Control      *lowest_scheduled_idle;
942  Scheduler_SMP_Action action;
943
944  lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
945    lowest_scheduled,
946    release_idle_node,
947    context
948  );
949
950  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
951
952  action = _Scheduler_SMP_Try_to_schedule( node, get_idle_node, context );
953
954  if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
955    _Scheduler_SMP_Preempt(
956      context,
957      node,
958      lowest_scheduled,
959      lowest_scheduled_idle,
960      allocate_processor
961    );
962
963    ( *insert_scheduled )( context, node, priority );
964  } else {
965    _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
966
967    if ( lowest_scheduled_idle != NULL ) {
968      (void) _Scheduler_Use_idle_thread( lowest_scheduled, get_idle_node, context );
969    }
970
971    ( *move_from_ready_to_scheduled )( context, lowest_scheduled );
972  }
973}
974
975/**
976 * @brief Enqueues a node according to the specified order function.
977 *
978 * The node must not be in the scheduled state.
979 *
980 * @param context The scheduler instance context.
981 * @param[in, out] node The node to enqueue.
982 * @param priority The node insert priority.
983 * @param order The order function.
984 * @param insert_ready Function to insert a node into the set of ready
985 *   nodes.
986 * @param insert_scheduled Function to insert a node into the set of
987 *   scheduled nodes.
988 * @param move_from_scheduled_to_ready Function to move a node from the set
989 *   of scheduled nodes to the set of ready nodes.
990 * @param get_lowest_scheduled Function to select the node from the
991 *   scheduled nodes to replace.  It may not be possible to find one, in this
992 *   case a pointer must be returned so that the order functions returns false
993 *   if this pointer is passed as the second argument to the order function.
994 * @param allocate_processor Function to allocate a processor to a node
995 *   based on the rules of the scheduler.
996 */
997static inline bool _Scheduler_SMP_Enqueue(
998  Scheduler_Context                  *context,
999  Scheduler_Node                     *node,
1000  Priority_Control                    insert_priority,
1001  Chain_Node_order                    order,
1002  Scheduler_SMP_Insert                insert_ready,
1003  Scheduler_SMP_Insert                insert_scheduled,
1004  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1005  Scheduler_SMP_Move                  move_from_ready_to_scheduled,
1006  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1007  Scheduler_SMP_Allocate_processor    allocate_processor,
1008  Scheduler_Get_idle_node             get_idle_node,
1009  Scheduler_Release_idle_node         release_idle_node
1010)
1011{
1012  bool            needs_help;
1013  Scheduler_Node *lowest_scheduled;
1014
1015  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1016
1017  if (
1018    ( *order )(
1019      &insert_priority,
1020      &node->Node.Chain,
1021      &lowest_scheduled->Node.Chain
1022    )
1023  ) {
1024    _Scheduler_SMP_Enqueue_to_scheduled(
1025      context,
1026      node,
1027      insert_priority,
1028      lowest_scheduled,
1029      insert_scheduled,
1030      move_from_scheduled_to_ready,
1031      move_from_ready_to_scheduled,
1032      allocate_processor,
1033      get_idle_node,
1034      release_idle_node
1035    );
1036    needs_help = false;
1037  } else {
1038    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1039    ( *insert_ready )( context, node, insert_priority );
1040    needs_help = true;
1041  }
1042
1043  return needs_help;
1044}
1045
1046/**
1047 * @brief Enqueues a scheduled node according to the specified order
1048 * function.
1049 *
1050 * @param context The scheduler instance context.
1051 * @param[in, out] node The node to enqueue.
1052 * @param order The order function.
1053 * @param extract_from_ready Function to extract a node from the set of
1054 *   ready nodes.
1055 * @param get_highest_ready Function to get the highest ready node.
1056 * @param insert_ready Function to insert a node into the set of ready
1057 *   nodes.
1058 * @param insert_scheduled Function to insert a node into the set of
1059 *   scheduled nodes.
1060 * @param move_from_ready_to_scheduled Function to move a node from the set
1061 *   of ready nodes to the set of scheduled nodes.
1062 * @param allocate_processor Function to allocate a processor to a node
1063 *   based on the rules of the scheduler.
1064 */
1065static inline void _Scheduler_SMP_Enqueue_scheduled(
1066  Scheduler_Context                *context,
1067  Scheduler_Node                   *const node,
1068  Priority_Control                  insert_priority,
1069  Chain_Node_order                  order,
1070  Scheduler_SMP_Extract             extract_from_ready,
1071  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1072  Scheduler_SMP_Insert              insert_ready,
1073  Scheduler_SMP_Insert              insert_scheduled,
1074  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1075  Scheduler_SMP_Allocate_processor  allocate_processor,
1076  Scheduler_Get_idle_node           get_idle_node,
1077  Scheduler_Release_idle_node       release_idle_node
1078)
1079{
1080  Thread_Control *node_idle;
1081
1082  node_idle = _Scheduler_Release_idle_thread_if_necessary(
1083    node,
1084    release_idle_node,
1085    context
1086  );
1087
1088  while ( true ) {
1089    Scheduler_Node       *highest_ready;
1090    Scheduler_SMP_Action  action;
1091
1092    highest_ready = ( *get_highest_ready )( context, node );
1093
1094    /*
1095     * The node has been extracted from the scheduled chain.  We have to place
1096     * it now on the scheduled or ready set.
1097     */
1098    if (
1099      node->sticky_level > 0 && ( *order )(
1100        &insert_priority,
1101        &node->Node.Chain,
1102        &highest_ready->Node.Chain
1103      )
1104    ) {
1105      if ( node_idle != NULL ) {
1106        Thread_Control   *owner;
1107        ISR_lock_Context  lock_context;
1108
1109        owner = _Scheduler_Node_get_owner( node );
1110        _Thread_Scheduler_acquire_critical( owner, &lock_context );
1111
1112        if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
1113          Per_CPU_Control *cpu;
1114
1115          _Scheduler_SMP_Cancel_ask_for_help( owner );
1116          _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
1117          cpu = _Thread_Get_CPU( node_idle );
1118          _Thread_Set_CPU( owner, cpu );
1119          _Thread_Scheduler_release_critical( owner, &lock_context );
1120          _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, owner );
1121        } else {
1122          Thread_Control *new_idle;
1123
1124          _Thread_Scheduler_release_critical( owner, &lock_context );
1125          new_idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
1126          _Assert_Unused_variable_equals( new_idle, node_idle );
1127        }
1128      }
1129
1130      ( *insert_scheduled )( context, node, insert_priority );
1131
1132      return;
1133    }
1134
1135    action = _Scheduler_SMP_Try_to_schedule(
1136      highest_ready,
1137      get_idle_node,
1138      context
1139    );
1140
1141    if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1142      _Scheduler_SMP_Preempt(
1143        context,
1144        highest_ready,
1145        node,
1146        node_idle,
1147        allocate_processor
1148      );
1149
1150      ( *move_from_ready_to_scheduled )( context, highest_ready );
1151      ( *insert_ready )( context, node, insert_priority );
1152      return;
1153    }
1154
1155    _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1156    ( *extract_from_ready )( context, highest_ready );
1157  }
1158}
1159
1160/**
1161 * @brief Extracts a scheduled node from the scheduled nodes.
1162 *
1163 * @param context This parameter is unused.
1164 * @param node The node to extract from the chain it belongs to.
1165 */
1166static inline void _Scheduler_SMP_Extract_from_scheduled(
1167  Scheduler_Context *context,
1168  Scheduler_Node    *node
1169)
1170{
1171  (void) context;
1172  _Chain_Extract_unprotected( &node->Node.Chain );
1173}
1174
1175/**
1176 * @brief Schedules the highest ready node.
1177 *
1178 * @param context The scheduler context instance.
1179 * @param victim The node of the thread that is repressed by the newly scheduled thread.
1180 * @param cpu is the processor to allocate.
1181 * @param extract_from_scheduled Function to extract a node from the set of
1182 *      scheduled nodes.
1183 * @param extract_from_ready Function to extract a node from the set of
1184 *      ready nodes.
1185 * @param get_highest_ready Function to get the highest ready node.
1186 * @param move_from_ready_to_scheduled Function to move a node from the set
1187 *      of ready nodes to the set of scheduled nodes.
1188 * @param allocate_processor Function to allocate a processor to a node
1189 *      based on the rules of the scheduler.
1190 */
1191static inline void _Scheduler_SMP_Schedule_highest_ready(
1192  Scheduler_Context                *context,
1193  Scheduler_Node                   *victim,
1194  Per_CPU_Control                  *cpu,
1195  Scheduler_SMP_Extract             extract_from_scheduled,
1196  Scheduler_SMP_Extract             extract_from_ready,
1197  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1198  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1199  Scheduler_SMP_Allocate_processor  allocate_processor,
1200  Scheduler_Get_idle_node           get_idle_node
1201)
1202{
1203  Scheduler_SMP_Action action;
1204
1205  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_BLOCKED );
1206  ( *extract_from_scheduled )( context, victim );
1207
1208  while ( true ) {
1209    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1210
1211    action = _Scheduler_SMP_Try_to_schedule(
1212      highest_ready,
1213      get_idle_node,
1214      context
1215    );
1216
1217    if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1218      _Scheduler_SMP_Allocate_processor(
1219        context,
1220        highest_ready,
1221        cpu,
1222        allocate_processor
1223      );
1224
1225      ( *move_from_ready_to_scheduled )( context, highest_ready );
1226      return;
1227    }
1228
1229    _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1230    ( *extract_from_ready )( context, highest_ready );
1231  }
1232}
1233
1234/**
1235 * @brief Schedules the highest ready node and preempts a currently executing one.
1236 *
1237 * @param context The scheduler context instance.
1238 * @param victim The node of the thread that is repressed by the newly scheduled thread.
1239 * @param extract_from_ready Function to extract a node from the set of
1240 *      ready nodes.
1241 * @param get_highest_ready Function to get the highest ready node.
1242 * @param move_from_ready_to_scheduled Function to move a node from the set
1243 *      of ready nodes to the set of scheduled nodes.
1244 * @param allocate_processor Function to allocate a processor to a node
1245 *      based on the rules of the scheduler.
1246 */
1247static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1248  Scheduler_Context                *context,
1249  Scheduler_Node                   *victim,
1250  Scheduler_SMP_Extract             extract_from_ready,
1251  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1252  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1253  Scheduler_SMP_Allocate_processor  allocate_processor,
1254  Scheduler_Get_idle_node           get_idle_node,
1255  Scheduler_Release_idle_node       release_idle_node
1256)
1257{
1258  Thread_Control      *victim_idle;
1259  Scheduler_SMP_Action action;
1260
1261  _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
1262  victim_idle = _Scheduler_Release_idle_thread_if_necessary(
1263    victim,
1264    release_idle_node,
1265    context
1266  );
1267
1268  while ( true ) {
1269    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1270
1271    action = _Scheduler_SMP_Try_to_schedule(
1272      highest_ready,
1273      get_idle_node,
1274      context
1275    );
1276
1277    if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1278      _Scheduler_SMP_Preempt(
1279        context,
1280        highest_ready,
1281        victim,
1282        victim_idle,
1283        allocate_processor
1284      );
1285
1286      ( *move_from_ready_to_scheduled )( context, highest_ready );
1287      return;
1288    }
1289
1290    _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1291    ( *extract_from_ready )( context, highest_ready );
1292  }
1293}
1294
1295/**
1296 * @brief Blocks the thread.
1297 *
1298 * @param context The scheduler instance context.
1299 * @param[in, out] thread The thread of the scheduling operation.
1300 * @param[in, out] node The scheduler node of the thread to block.
1301 * @param extract_from_scheduled Function to extract a node from the set of
1302 *      scheduled nodes.
1303 * @param extract_from_ready Function to extract a node from the set of
1304 *      ready nodes.
1305 * @param get_highest_ready Function to get the highest ready node.
1306 * @param move_from_ready_to_scheduled Function to move a node from the set
1307 *      of ready nodes to the set of scheduled nodes.
1308 * @param allocate_processor Function to allocate a processor to a node
1309 *      based on the rules of the scheduler.
1310 */
1311static inline void _Scheduler_SMP_Block(
1312  Scheduler_Context                *context,
1313  Thread_Control                   *thread,
1314  Scheduler_Node                   *node,
1315  Scheduler_SMP_Extract             extract_from_scheduled,
1316  Scheduler_SMP_Extract             extract_from_ready,
1317  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1318  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1319  Scheduler_SMP_Allocate_processor  allocate_processor,
1320  Scheduler_Get_idle_node           get_idle_node
1321)
1322{
1323  int                       sticky_level;
1324  ISR_lock_Context          lock_context;
1325  Scheduler_SMP_Node_state  node_state;
1326  Per_CPU_Control          *cpu;
1327
1328  sticky_level = node->sticky_level;
1329  --sticky_level;
1330  node->sticky_level = sticky_level;
1331  _Assert( sticky_level >= 0 );
1332
1333  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1334  _Scheduler_SMP_Cancel_ask_for_help( thread );
1335  cpu = _Thread_Get_CPU( thread );
1336  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1337  _Thread_Scheduler_release_critical( thread, &lock_context );
1338
1339  node_state = _Scheduler_SMP_Node_state( node );
1340
1341  if ( RTEMS_PREDICT_FALSE( sticky_level > 0 ) ) {
1342    if (
1343      node_state == SCHEDULER_SMP_NODE_SCHEDULED &&
1344      _Scheduler_Node_get_idle( node ) == NULL
1345    ) {
1346      Thread_Control *idle;
1347
1348      idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
1349      _Thread_Set_CPU( idle, cpu );
1350      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, idle );
1351    }
1352
1353    return;
1354  }
1355
1356  _Assert( _Scheduler_Node_get_user( node ) == thread );
1357  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
1358
1359  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1360    _Scheduler_SMP_Schedule_highest_ready(
1361      context,
1362      node,
1363      cpu,
1364      extract_from_scheduled,
1365      extract_from_ready,
1366      get_highest_ready,
1367      move_from_ready_to_scheduled,
1368      allocate_processor,
1369      get_idle_node
1370    );
1371  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1372    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1373    ( *extract_from_ready )( context, node );
1374  }
1375}
1376
1377/**
1378 * @brief Unblocks the thread.
1379 *
1380 * @param context The scheduler instance context.
1381 * @param[in, out] thread The thread of the scheduling operation.
1382 * @param[in, out] node The scheduler node of the thread to block.
1383 * @param update Function to update the node's priority to the new value.
1384 * @param enqueue Function to insert a node with a priority in the ready queue
1385 *      of a context.
1386 */
1387static inline void _Scheduler_SMP_Unblock(
1388  Scheduler_Context          *context,
1389  Thread_Control             *thread,
1390  Scheduler_Node             *node,
1391  Scheduler_SMP_Update        update,
1392  Scheduler_SMP_Enqueue       enqueue,
1393  Scheduler_Release_idle_node release_idle_node
1394)
1395{
1396  Scheduler_SMP_Node_state  node_state;
1397  Priority_Control          priority;
1398
1399  _Assert( _Chain_Is_node_off_chain( &thread->Scheduler.Help_node ) );
1400
1401  ++node->sticky_level;
1402  _Assert( node->sticky_level > 0 );
1403
1404  node_state = _Scheduler_SMP_Node_state( node );
1405
1406  if ( RTEMS_PREDICT_FALSE( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) ) {
1407    _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1408    _Scheduler_Discard_idle_thread(
1409      thread,
1410      node,
1411      release_idle_node,
1412      context
1413    );
1414
1415    return;
1416  }
1417
1418  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_READY );
1419
1420  priority = _Scheduler_Node_get_priority( node );
1421  priority = SCHEDULER_PRIORITY_PURIFY( priority );
1422
1423  if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1424    ( *update )( context, node, priority );
1425  }
1426
1427  if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1428    Priority_Control insert_priority;
1429    bool             needs_help;
1430
1431    insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1432    needs_help = ( *enqueue )( context, node, insert_priority );
1433
1434    if ( needs_help && thread->Scheduler.helping_nodes > 0 ) {
1435      _Scheduler_SMP_Request_ask_for_help( thread );
1436    }
1437  } else {
1438    _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1439    _Assert( node->sticky_level > 0 );
1440    _Assert( node->idle == NULL );
1441    _Scheduler_SMP_Request_ask_for_help( thread );
1442  }
1443}
1444
1445/**
1446 * @brief Updates the priority of the node and the position in the queues it
1447 * is in.
1448 *
1449 * This function firstly updates the priority of the node and then extracts
1450 * and reinserts it into the queue the node is part of using the given
1451 * functions.
1452 *
1453 * @param context The scheduler instance context.
1454 * @param thread The thread for the operation.
1455 * @param[in, out] node The node to update the priority of.
1456 * @param extract_from_scheduled Function to extract a node from the set of
1457 *      scheduled nodes.
1458 * @param extract_from_ready Function to extract a node from the ready
1459 *      queue of the scheduler context.
1460 * @param update Function to update the priority of a node in the scheduler
1461 *      context.
1462 * @param enqueue Function to enqueue a node with a given priority.
1463 * @param enqueue_scheduled Function to enqueue a scheduled node.
1464 * @param ask_for_help Function to perform a help request.
1465 */
1466static inline void _Scheduler_SMP_Update_priority(
1467  Scheduler_Context              *context,
1468  Thread_Control                 *thread,
1469  Scheduler_Node                 *node,
1470  Scheduler_SMP_Extract           extract_from_scheduled,
1471  Scheduler_SMP_Extract           extract_from_ready,
1472  Scheduler_SMP_Update            update,
1473  Scheduler_SMP_Enqueue           enqueue,
1474  Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
1475  Scheduler_SMP_Ask_for_help      ask_for_help
1476)
1477{
1478  Priority_Control         priority;
1479  Priority_Control         insert_priority;
1480  Scheduler_SMP_Node_state node_state;
1481
1482  insert_priority = _Scheduler_Node_get_priority( node );
1483  priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
1484
1485  if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
1486    if ( _Thread_Is_ready( thread ) ) {
1487      ( *ask_for_help )( context, thread, node );
1488    }
1489
1490    return;
1491  }
1492
1493  node_state = _Scheduler_SMP_Node_state( node );
1494
1495  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1496    ( *extract_from_scheduled )( context, node );
1497    ( *update )( context, node, priority );
1498    ( *enqueue_scheduled )( context, node, insert_priority );
1499  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1500    ( *extract_from_ready )( context, node );
1501    ( *update )( context, node, priority );
1502    ( *enqueue )( context, node, insert_priority );
1503  } else {
1504    ( *update )( context, node, priority );
1505
1506    if ( _Thread_Is_ready( thread ) ) {
1507      ( *ask_for_help )( context, thread, node );
1508    }
1509  }
1510}
1511
1512/**
1513 * @brief Performs a yield and asks for help if necessary.
1514 *
1515 * @param context The scheduler instance context.
1516 * @param thread The thread for the operation.
1517 * @param node The node of the thread that yields.
1518 * @param extract_from_scheduled Function to extract a node from the set of
1519 *      scheduled nodes.
1520 * @param extract_from_ready Function to extract a node from the ready
1521 *      queue of the scheduler context.
1522 * @param enqueue Function to enqueue a node with a given priority.
1523 * @param enqueue_scheduled Function to enqueue a scheduled node.
1524 */
1525static inline void _Scheduler_SMP_Yield(
1526  Scheduler_Context              *context,
1527  Thread_Control                 *thread,
1528  Scheduler_Node                 *node,
1529  Scheduler_SMP_Extract           extract_from_scheduled,
1530  Scheduler_SMP_Extract           extract_from_ready,
1531  Scheduler_SMP_Enqueue           enqueue,
1532  Scheduler_SMP_Enqueue_scheduled enqueue_scheduled
1533)
1534{
1535  Scheduler_SMP_Node_state node_state;
1536  Priority_Control         insert_priority;
1537
1538  node_state = _Scheduler_SMP_Node_state( node );
1539  insert_priority = _Scheduler_SMP_Node_priority( node );
1540  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1541
1542  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1543    ( *extract_from_scheduled )( context, node );
1544    ( *enqueue_scheduled )( context, node, insert_priority );
1545  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1546    ( *extract_from_ready )( context, node );
1547    (void) ( *enqueue )( context, node, insert_priority );
1548  }
1549}
1550
1551/**
1552 * @brief Inserts the node with the given priority into the scheduled nodes.
1553 *
1554 * @param context The scheduler instance context.
1555 * @param node_to_insert The scheduled node to insert.
1556 * @param priority_to_insert The priority with which to insert the node.
1557 */
1558static inline void _Scheduler_SMP_Insert_scheduled(
1559  Scheduler_Context *context,
1560  Scheduler_Node    *node_to_insert,
1561  Priority_Control   priority_to_insert
1562)
1563{
1564  Scheduler_SMP_Context *self;
1565
1566  self = _Scheduler_SMP_Get_self( context );
1567
1568  _Chain_Insert_ordered_unprotected(
1569    &self->Scheduled,
1570    &node_to_insert->Node.Chain,
1571    &priority_to_insert,
1572    _Scheduler_SMP_Priority_less_equal
1573  );
1574}
1575
1576/**
1577 * @brief Asks for help.
1578 *
1579 * @param context The scheduler instance context.
1580 * @param thread The thread that asks for help.
1581 * @param[in, out] node The node of the thread that performs the ask for help
1582 *      operation.
1583 * @param order The order function.
1584 * @param insert_ready Function to insert a node into the set of ready
1585 *      nodes.
1586 * @param insert_scheduled Function to insert a node into the set of
1587 *      scheduled nodes.
1588 * @param move_from_scheduled_to_ready Function to move a node from the set
1589 *      of scheduled nodes to the set of ready nodes.
1590 * @param get_lowest_scheduled Function to select the node from the
1591 *      scheduled nodes to replace.
1592 * @param allocate_processor Function to allocate a processor to a node
1593 *      based on the rules of the scheduler.
1594 *
1595 * @retval true The ask for help operation was successful.
1596 * @retval false The ask for help operation was not successful.
1597 */
1598static inline bool _Scheduler_SMP_Ask_for_help(
1599  Scheduler_Context                  *context,
1600  Thread_Control                     *thread,
1601  Scheduler_Node                     *node,
1602  Chain_Node_order                    order,
1603  Scheduler_SMP_Insert                insert_ready,
1604  Scheduler_SMP_Insert                insert_scheduled,
1605  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
1606  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
1607  Scheduler_SMP_Allocate_processor    allocate_processor,
1608  Scheduler_Release_idle_node         release_idle_node
1609)
1610{
1611  Scheduler_Node   *lowest_scheduled;
1612  ISR_lock_Context  lock_context;
1613  bool              success;
1614
1615  if ( thread->Scheduler.pinned_scheduler != NULL ) {
1616    /*
1617     * Pinned threads are not allowed to ask for help.  Return success to break
1618     * the loop in _Thread_Ask_for_help() early.
1619     */
1620    return true;
1621  }
1622
1623  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1624
1625  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1626
1627  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1628    Scheduler_SMP_Node_state node_state;
1629
1630    node_state = _Scheduler_SMP_Node_state( node );
1631
1632    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1633      Priority_Control insert_priority;
1634
1635      insert_priority = _Scheduler_SMP_Node_priority( node );
1636
1637      if (
1638        ( *order )(
1639          &insert_priority,
1640          &node->Node.Chain,
1641          &lowest_scheduled->Node.Chain
1642        )
1643      ) {
1644        Thread_Control *lowest_scheduled_idle;
1645
1646        _Scheduler_SMP_Cancel_ask_for_help( thread );
1647        _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1648        _Thread_Scheduler_release_critical( thread, &lock_context );
1649
1650        lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
1651          lowest_scheduled,
1652          release_idle_node,
1653          context
1654        );
1655
1656        _Scheduler_SMP_Preempt(
1657          context,
1658          node,
1659          lowest_scheduled,
1660          lowest_scheduled_idle,
1661          allocate_processor
1662        );
1663
1664        ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1665        ( *insert_scheduled )( context, node, insert_priority );
1666
1667        success = true;
1668      } else {
1669        _Thread_Scheduler_release_critical( thread, &lock_context );
1670
1671        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1672        ( *insert_ready )( context, node, insert_priority );
1673        success = false;
1674      }
1675    } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1676      _Scheduler_SMP_Cancel_ask_for_help( thread );
1677      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1678      _Thread_Scheduler_release_critical( thread, &lock_context );
1679      _Scheduler_Discard_idle_thread(
1680        thread,
1681        node,
1682        release_idle_node,
1683        context
1684      );
1685      success = true;
1686    } else {
1687      _Thread_Scheduler_release_critical( thread, &lock_context );
1688      success = false;
1689    }
1690  } else {
1691    _Thread_Scheduler_release_critical( thread, &lock_context );
1692    success = false;
1693  }
1694
1695  return success;
1696}
1697
1698/**
1699 * @brief Reconsiders help request.
1700 *
1701 * @param context The scheduler context instance.
1702 * @param thread The thread to reconsider the help request of.
1703 * @param[in, out] node The scheduler node of @a thread.
1704 * @param extract_from_ready Function to extract a node from the ready queue
1705 *      of the scheduler context.
1706 */
1707static inline void _Scheduler_SMP_Reconsider_help_request(
1708  Scheduler_Context     *context,
1709  Thread_Control        *thread,
1710  Scheduler_Node        *node,
1711  Scheduler_SMP_Extract  extract_from_ready
1712)
1713{
1714  ISR_lock_Context lock_context;
1715
1716  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1717
1718  if (
1719    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1720      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1721      && node->sticky_level == 1
1722  ) {
1723    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1724    ( *extract_from_ready )( context, node );
1725  }
1726
1727  _Thread_Scheduler_release_critical( thread, &lock_context );
1728}
1729
1730/**
1731 * @brief Withdraws the node.
1732 *
1733 * @param context The scheduler context instance.
1734 * @param[in, out] thread The thread to change to @a next_state.
1735 * @param[in, out] node The node to withdraw.
1736 * @param next_state The new state for @a thread.
1737 * @param extract_from_scheduled Function to extract a node from the set of
1738 *      scheduled nodes.
1739 * @param extract_from_ready Function to extract a node from the ready queue
1740 *      of the scheduler context.
1741 * @param get_highest_ready Function to get the highest ready node.
1742 * @param move_from_ready_to_scheduled Function to move a node from the set
1743 *      of ready nodes to the set of scheduled nodes.
1744 * @param allocate_processor Function to allocate a processor to a node
1745 *      based on the rules of the scheduler.
1746 */
1747static inline void _Scheduler_SMP_Withdraw_node(
1748  Scheduler_Context                *context,
1749  Thread_Control                   *thread,
1750  Scheduler_Node                   *node,
1751  Thread_Scheduler_state            next_state,
1752  Scheduler_SMP_Extract             extract_from_scheduled,
1753  Scheduler_SMP_Extract             extract_from_ready,
1754  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1755  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1756  Scheduler_SMP_Allocate_processor  allocate_processor,
1757  Scheduler_Get_idle_node           get_idle_node
1758)
1759{
1760  ISR_lock_Context         lock_context;
1761  Scheduler_SMP_Node_state node_state;
1762
1763  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1764
1765  node_state = _Scheduler_SMP_Node_state( node );
1766
1767  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1768    Per_CPU_Control *cpu;
1769
1770    _Assert( thread == _Scheduler_Node_get_user( node ) );
1771    cpu = _Thread_Get_CPU( thread );
1772    _Scheduler_Thread_change_state( thread, next_state );
1773    _Thread_Scheduler_release_critical( thread, &lock_context );
1774
1775    _Assert( _Scheduler_Node_get_user( node ) == thread );
1776    _Assert( _Scheduler_Node_get_idle( node ) == NULL );
1777
1778    _Scheduler_SMP_Schedule_highest_ready(
1779      context,
1780      node,
1781      cpu,
1782      extract_from_scheduled,
1783      extract_from_ready,
1784      get_highest_ready,
1785      move_from_ready_to_scheduled,
1786      allocate_processor,
1787      get_idle_node
1788    );
1789  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1790    _Thread_Scheduler_release_critical( thread, &lock_context );
1791    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1792    ( *extract_from_ready )( context, node );
1793  } else {
1794    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1795    _Thread_Scheduler_release_critical( thread, &lock_context );
1796  }
1797}
1798
1799/**
1800 * @brief Makes the node sticky.
1801 *
1802 * @param scheduler is the scheduler of the node.
1803 *
1804 * @param[in, out] the_thread is the thread owning the node.
1805 *
1806 * @param[in, out] node is the scheduler node to make sticky.
1807 */
1808static inline void _Scheduler_SMP_Make_sticky(
1809  const Scheduler_Control *scheduler,
1810  Thread_Control          *the_thread,
1811  Scheduler_Node          *node,
1812  Scheduler_SMP_Update     update,
1813  Scheduler_SMP_Enqueue    enqueue
1814)
1815{
1816  Scheduler_SMP_Node_state node_state;
1817
1818  node_state = _Scheduler_SMP_Node_state( node );
1819
1820  if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1821    Scheduler_Context *context;
1822    Priority_Control   insert_priority;
1823    Priority_Control   priority;
1824
1825    context = _Scheduler_Get_context( scheduler );
1826    priority = _Scheduler_Node_get_priority( node );
1827    priority = SCHEDULER_PRIORITY_PURIFY( priority );
1828
1829    if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1830      ( *update )( context, node, priority );
1831    }
1832
1833    insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1834    (void) ( *enqueue )( context, node, insert_priority );
1835  }
1836}
1837
1838/**
1839 * @brief Cleans the sticky property from the node.
1840 *
1841 * @param scheduler is the scheduler of the node.
1842 *
1843 * @param[in, out] the_thread is the thread owning the node.
1844 *
1845 * @param[in, out] node is the scheduler node to clean the sticky property.
1846 */
1847static inline void _Scheduler_SMP_Clean_sticky(
1848  const Scheduler_Control          *scheduler,
1849  Thread_Control                   *the_thread,
1850  Scheduler_Node                   *node,
1851  Scheduler_SMP_Extract             extract_from_scheduled,
1852  Scheduler_SMP_Extract             extract_from_ready,
1853  Scheduler_SMP_Get_highest_ready   get_highest_ready,
1854  Scheduler_SMP_Move                move_from_ready_to_scheduled,
1855  Scheduler_SMP_Allocate_processor  allocate_processor,
1856  Scheduler_Get_idle_node           get_idle_node,
1857  Scheduler_Release_idle_node       release_idle_node
1858)
1859{
1860  Scheduler_SMP_Node_state node_state;
1861
1862  node_state = _Scheduler_SMP_Node_state( node );
1863
1864  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1865    Thread_Control *idle;
1866
1867    idle = _Scheduler_Node_get_idle( node );
1868
1869    if ( idle != NULL ) {
1870      Scheduler_Context *context;
1871
1872      context = _Scheduler_Get_context( scheduler );
1873
1874      _Scheduler_Release_idle_thread( node, idle, release_idle_node, context );
1875      _Scheduler_SMP_Schedule_highest_ready(
1876        context,
1877        node,
1878        _Thread_Get_CPU( idle ),
1879        extract_from_scheduled,
1880        extract_from_ready,
1881        get_highest_ready,
1882        move_from_ready_to_scheduled,
1883        allocate_processor,
1884        get_idle_node
1885      );
1886    }
1887  }
1888}
1889
1890/**
1891 * @brief Starts the idle thread on the given processor.
1892 *
1893 * @param context The scheduler context instance.
1894 * @param[in, out] idle The idle thread to schedule.
1895 * @param cpu The processor for the idle thread.
1896 * @param register_idle Function to register the idle thread for a cpu.
1897 */
1898static inline void _Scheduler_SMP_Do_start_idle(
1899  Scheduler_Context           *context,
1900  Thread_Control              *idle,
1901  Per_CPU_Control             *cpu,
1902  Scheduler_SMP_Register_idle  register_idle
1903)
1904{
1905  Scheduler_SMP_Context *self;
1906  Scheduler_SMP_Node    *node;
1907
1908  self = _Scheduler_SMP_Get_self( context );
1909  node = _Scheduler_SMP_Thread_get_node( idle );
1910
1911  _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
1912  node->state = SCHEDULER_SMP_NODE_SCHEDULED;
1913
1914  _Thread_Set_CPU( idle, cpu );
1915  ( *register_idle )( context, &node->Base, cpu );
1916  _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
1917}
1918
1919/**
1920 * @brief Adds the idle thread to the processor.
1921 *
1922 * @param context The scheduler context instance.
1923 * @param[in, out] idle The idle thread to add to the processor.
1924 * @param has_ready Function that checks if a given context has ready threads.
1925 * @param enqueue_scheduled Function to enqueue a scheduled node.
1926 * @param register_idle Function to register the idle thread for a cpu.
1927 */
1928static inline void _Scheduler_SMP_Add_processor(
1929  Scheduler_Context              *context,
1930  Thread_Control                 *idle,
1931  Scheduler_SMP_Has_ready         has_ready,
1932  Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
1933  Scheduler_SMP_Register_idle     register_idle
1934)
1935{
1936  Scheduler_SMP_Context *self;
1937  Scheduler_Node        *node;
1938
1939  self = _Scheduler_SMP_Get_self( context );
1940  idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1941  node = _Thread_Scheduler_get_home_node( idle );
1942  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
1943  ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
1944
1945  if ( ( *has_ready )( &self->Base ) ) {
1946    Priority_Control insert_priority;
1947
1948    insert_priority = _Scheduler_SMP_Node_priority( node );
1949    insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1950    ( *enqueue_scheduled )( &self->Base, node, insert_priority );
1951  } else {
1952    _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
1953  }
1954}
1955
1956/**
1957 * @brief Removes an idle thread from the processor.
1958 *
1959 * @param context The scheduler context instance.
1960 * @param cpu The processor to remove from.
1961 * @param extract_from_scheduled Function to extract a node from the set of
1962 *      scheduled nodes.
1963 * @param extract_from_ready Function to extract a node from the ready queue
1964 *      of the scheduler context.
1965 * @param enqueue Function to enqueue a node with a given priority.
1966 *
1967 * @return The idle thread of @a cpu.
1968 */
1969static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1970  Scheduler_Context          *context,
1971  Per_CPU_Control            *cpu,
1972  Scheduler_SMP_Extract       extract_from_scheduled,
1973  Scheduler_SMP_Extract       extract_from_ready,
1974  Scheduler_SMP_Enqueue       enqueue,
1975  Scheduler_Get_idle_node     get_idle_node,
1976  Scheduler_Release_idle_node release_idle_node
1977)
1978{
1979  Scheduler_SMP_Context *self;
1980  Chain_Node            *chain_node;
1981  Scheduler_Node        *victim_node;
1982  Thread_Control        *victim_user;
1983  Thread_Control        *victim_owner;
1984  Thread_Control        *idle;
1985
1986  self = _Scheduler_SMP_Get_self( context );
1987  chain_node = _Chain_First( &self->Scheduled );
1988
1989  do {
1990    _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1991    victim_node = (Scheduler_Node *) chain_node;
1992    victim_user = _Scheduler_Node_get_user( victim_node );
1993    chain_node = _Chain_Next( chain_node );
1994  } while ( _Thread_Get_CPU( victim_user ) != cpu );
1995
1996  ( *extract_from_scheduled )( &self->Base, victim_node );
1997  victim_owner = _Scheduler_Node_get_owner( victim_node );
1998
1999  if ( !victim_owner->is_idle ) {
2000    Thread_Control  *victim_idle;
2001    Scheduler_Node  *idle_node;
2002    Priority_Control insert_priority;
2003
2004    victim_idle = _Scheduler_Release_idle_thread_if_necessary(
2005      victim_node,
2006      release_idle_node,
2007      &self->Base
2008    );
2009    idle_node = ( *get_idle_node )( &self->Base );
2010    idle = _Scheduler_Node_get_owner( idle_node );
2011    _Scheduler_SMP_Preempt(
2012      &self->Base,
2013      idle_node,
2014      victim_node,
2015      victim_idle,
2016      _Scheduler_SMP_Allocate_processor_exact
2017    );
2018
2019    _Assert( !_Chain_Is_empty( &self->Scheduled ) );
2020    insert_priority = _Scheduler_SMP_Node_priority( victim_node );
2021    insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
2022    ( *enqueue )( &self->Base, victim_node, insert_priority );
2023  } else {
2024    _Assert( victim_owner == victim_user );
2025    _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
2026    idle = victim_owner;
2027  }
2028
2029  return idle;
2030}
2031
2032/**
2033 * @brief Sets the affinity of the node.
2034 *
2035 * Also performs a reinsert into the queue the node is currently in.
2036 *
2037 * @param context The scheduler context instance.
2038 * @param thread The thread for the operation.
2039 * @param[in, out] node The node to set the affinity of.
2040 * @param arg The affinity for @a node.
2041 * @param set_affinity Function to set the affinity of a node.
2042 * @param extract_from_scheduled Function to extract a node from the set of
2043 *      scheduled nodes.
2044 * @param extract_from_ready Function to extract a node from the ready queue
2045 *      of the scheduler context.
2046 * @param get_highest_ready Function to get the highest ready node.
2047 * @param move_from_ready_to_scheduled Function to move a node from the set
2048 *      of ready nodes to the set of scheduled nodes.
2049 * @param enqueue Function to enqueue a node with a given priority.
2050 * @param allocate_processor Function to allocate a processor to a node
2051 *      based on the rules of the scheduler.
2052 */
2053static inline void _Scheduler_SMP_Set_affinity(
2054  Scheduler_Context               *context,
2055  Thread_Control                  *thread,
2056  Scheduler_Node                  *node,
2057  void                            *arg,
2058  Scheduler_SMP_Set_affinity       set_affinity,
2059  Scheduler_SMP_Extract            extract_from_scheduled,
2060  Scheduler_SMP_Extract            extract_from_ready,
2061  Scheduler_SMP_Get_highest_ready  get_highest_ready,
2062  Scheduler_SMP_Move               move_from_ready_to_scheduled,
2063  Scheduler_SMP_Enqueue            enqueue,
2064  Scheduler_SMP_Allocate_processor allocate_processor,
2065  Scheduler_Get_idle_node          get_idle_node,
2066  Scheduler_Release_idle_node      release_idle_node
2067)
2068{
2069  Scheduler_SMP_Node_state node_state;
2070  Priority_Control         insert_priority;
2071
2072  node_state = _Scheduler_SMP_Node_state( node );
2073  insert_priority = _Scheduler_SMP_Node_priority( node );
2074  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
2075
2076  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
2077    ( *extract_from_scheduled )( context, node );
2078    _Scheduler_SMP_Preempt_and_schedule_highest_ready(
2079      context,
2080      node,
2081      extract_from_ready,
2082      get_highest_ready,
2083      move_from_ready_to_scheduled,
2084      allocate_processor,
2085      get_idle_node,
2086      release_idle_node
2087    );
2088    ( *set_affinity )( context, node, arg );
2089    ( *enqueue )( context, node, insert_priority );
2090  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
2091    ( *extract_from_ready )( context, node );
2092    ( *set_affinity )( context, node, arg );
2093    ( *enqueue )( context, node, insert_priority );
2094  } else {
2095    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
2096    ( *set_affinity )( context, node, arg );
2097  }
2098}
2099
2100/** @} */
2101
2102#ifdef __cplusplus
2103}
2104#endif /* __cplusplus */
2105
2106#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Note: See TracBrowser for help on using the repository browser.