source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 0e754fac

5
Last change on this file since 0e754fac was 0e754fac, checked in by Sebastian Huber <sebastian.huber@…>, on 10/21/16 at 12:41:19

score: Delete unused scheduler ask for help X op

  • Property mode set to 100644
File size: 39.5 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/cpusetimpl.h>
26#include <rtems/score/priorityimpl.h>
27#include <rtems/score/smpimpl.h>
28#include <rtems/score/status.h>
29#include <rtems/score/threadimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35/**
36 * @addtogroup ScoreScheduler
37 */
38/**@{**/
39
40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
57  const Thread_Control *the_thread
58)
59{
60#if defined(RTEMS_SMP)
61  return the_thread->Scheduler.control;
62#else
63  (void) the_thread;
64
65  return &_Scheduler_Table[ 0 ];
66#endif
67}
68
69RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
70  const Thread_Control *the_thread
71)
72{
73#if defined(RTEMS_SMP)
74  return the_thread->Scheduler.own_control;
75#else
76  (void) the_thread;
77
78  return &_Scheduler_Table[ 0 ];
79#endif
80}
81
82RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
83  uint32_t cpu_index
84)
85{
86#if defined(RTEMS_SMP)
87  return _Scheduler_Assignments[ cpu_index ].scheduler;
88#else
89  (void) cpu_index;
90
91  return &_Scheduler_Table[ 0 ];
92#endif
93}
94
95RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
96  const Per_CPU_Control *cpu
97)
98{
99  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
100
101  return _Scheduler_Get_by_CPU_index( cpu_index );
102}
103
104/**
105 * @brief Acquires the scheduler instance inside a critical section (interrupts
106 * disabled).
107 *
108 * @param[in] scheduler The scheduler instance.
109 * @param[in] lock_context The lock context to use for
110 *   _Scheduler_Release_critical().
111 */
112RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
113  const Scheduler_Control *scheduler,
114  ISR_lock_Context        *lock_context
115)
116{
117#if defined(RTEMS_SMP)
118  Scheduler_Context *context;
119
120  context = _Scheduler_Get_context( scheduler );
121  _ISR_lock_Acquire( &context->Lock, lock_context );
122#else
123  (void) scheduler;
124  (void) lock_context;
125#endif
126}
127
128/**
129 * @brief Releases the scheduler instance inside a critical section (interrupts
130 * disabled).
131 *
132 * @param[in] scheduler The scheduler instance.
133 * @param[in] lock_context The lock context used for
134 *   _Scheduler_Acquire_critical().
135 */
136RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
137  const Scheduler_Control *scheduler,
138  ISR_lock_Context        *lock_context
139)
140{
141#if defined(RTEMS_SMP)
142  Scheduler_Context *context;
143
144  context = _Scheduler_Get_context( scheduler );
145  _ISR_lock_Release( &context->Lock, lock_context );
146#else
147  (void) scheduler;
148  (void) lock_context;
149#endif
150}
151
152RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
153  const Thread_Control *the_thread
154)
155{
156#if defined(RTEMS_SMP)
157  return the_thread->Scheduler.node;
158#else
159  return the_thread->Scheduler.nodes;
160#endif
161}
162
163/**
164 * The preferred method to add a new scheduler is to define the jump table
165 * entries and add a case to the _Scheduler_Initialize routine.
166 *
167 * Generic scheduling implementations that rely on the ready queue only can
168 * be found in the _Scheduler_queue_XXX functions.
169 */
170
171/*
172 * Passing the Scheduler_Control* to these functions allows for multiple
173 * scheduler's to exist simultaneously, which could be useful on an SMP
174 * system.  Then remote Schedulers may be accessible.  How to protect such
175 * accesses remains an open problem.
176 */
177
178/**
179 * @brief General scheduling decision.
180 *
181 * This kernel routine implements the scheduling decision logic for
182 * the scheduler. It does NOT dispatch.
183 *
184 * @param[in] the_thread The thread which state changed previously.
185 */
186RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
187{
188  const Scheduler_Control *scheduler;
189  ISR_lock_Context         lock_context;
190
191  scheduler = _Scheduler_Get( the_thread );
192  _Scheduler_Acquire_critical( scheduler, &lock_context );
193
194  ( *scheduler->Operations.schedule )( scheduler, the_thread );
195
196  _Scheduler_Release_critical( scheduler, &lock_context );
197}
198
199/**
200 * @brief Scheduler yield with a particular thread.
201 *
202 * This routine is invoked when a thread wishes to voluntarily transfer control
203 * of the processor to another thread.
204 *
205 * @param[in] the_thread The yielding thread.
206 */
207RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
208{
209#if defined(RTEMS_SMP)
210  Chain_Node              *node;
211  const Chain_Node        *tail;
212  Scheduler_Node          *scheduler_node;
213  const Scheduler_Control *scheduler;
214  ISR_lock_Context         lock_context;
215  Thread_Control          *needs_help;
216
217  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
218  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
219
220  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
221  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
222
223  _Scheduler_Acquire_critical( scheduler, &lock_context );
224  needs_help = ( *scheduler->Operations.yield )(
225    scheduler,
226    the_thread,
227    _Thread_Scheduler_get_home_node( the_thread )
228  );
229  _Scheduler_Release_critical( scheduler, &lock_context );
230
231  if ( needs_help != the_thread ) {
232    return;
233  }
234
235  node = _Chain_Next( node );
236
237  while ( node != tail ) {
238    bool success;
239
240    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
241    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
242
243    _Scheduler_Acquire_critical( scheduler, &lock_context );
244    success = ( *scheduler->Operations.ask_for_help )(
245      scheduler,
246      the_thread,
247      scheduler_node
248    );
249    _Scheduler_Release_critical( scheduler, &lock_context );
250
251    if ( success ) {
252      break;
253    }
254
255    node = _Chain_Next( node );
256  }
257#else
258  const Scheduler_Control *scheduler;
259
260  scheduler = _Scheduler_Get( the_thread );
261  ( *scheduler->Operations.yield )(
262    scheduler,
263    the_thread,
264    _Thread_Scheduler_get_home_node( the_thread )
265  );
266#endif
267}
268
269/**
270 * @brief Blocks a thread with respect to the scheduler.
271 *
272 * This routine removes @a the_thread from the scheduling decision for
273 * the scheduler. The primary task is to remove the thread from the
274 * ready queue.  It performs any necessary schedulering operations
275 * including the selection of a new heir thread.
276 *
277 * @param[in] the_thread The thread.
278 */
279RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
280{
281#if defined(RTEMS_SMP)
282  Chain_Node              *node;
283  const Chain_Node        *tail;
284  Scheduler_Node          *scheduler_node;
285  const Scheduler_Control *scheduler;
286  ISR_lock_Context         lock_context;
287
288  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
289  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
290
291  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
292  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
293
294  _Scheduler_Acquire_critical( scheduler, &lock_context );
295  ( *scheduler->Operations.block )(
296    scheduler,
297    the_thread,
298    scheduler_node
299  );
300  _Scheduler_Release_critical( scheduler, &lock_context );
301
302  node = _Chain_Next( node );
303
304  while ( node != tail ) {
305    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
306    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
307
308    _Scheduler_Acquire_critical( scheduler, &lock_context );
309    ( *scheduler->Operations.withdraw_node )(
310      scheduler,
311      the_thread,
312      scheduler_node,
313      THREAD_SCHEDULER_BLOCKED
314    );
315    _Scheduler_Release_critical( scheduler, &lock_context );
316
317    node = _Chain_Next( node );
318  }
319#else
320  const Scheduler_Control *scheduler;
321
322  scheduler = _Scheduler_Get( the_thread );
323  ( *scheduler->Operations.block )(
324    scheduler,
325    the_thread,
326    _Thread_Scheduler_get_home_node( the_thread )
327  );
328#endif
329}
330
331/**
332 * @brief Unblocks a thread with respect to the scheduler.
333 *
334 * This operation must fetch the latest thread priority value for this
335 * scheduler instance and update its internal state if necessary.
336 *
337 * @param[in] the_thread The thread.
338 *
339 * @see _Scheduler_Node_get_priority().
340 */
341RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
342{
343#if defined(RTEMS_SMP)
344  Chain_Node              *node;
345  const Chain_Node        *tail;
346  Scheduler_Node          *scheduler_node;
347  const Scheduler_Control *scheduler;
348  ISR_lock_Context         lock_context;
349  Thread_Control          *needs_help;
350
351  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
352  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
353
354  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
355  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
356
357  _Scheduler_Acquire_critical( scheduler, &lock_context );
358  needs_help = ( *scheduler->Operations.unblock )(
359    scheduler,
360    the_thread,
361    scheduler_node
362  );
363  _Scheduler_Release_critical( scheduler, &lock_context );
364
365  if ( needs_help != the_thread ) {
366    return;
367  }
368
369  node = _Chain_Next( node );
370
371  while ( node != tail ) {
372    bool success;
373
374    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
375    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
376
377    _Scheduler_Acquire_critical( scheduler, &lock_context );
378    success = ( *scheduler->Operations.ask_for_help )(
379      scheduler,
380      the_thread,
381      scheduler_node
382    );
383    _Scheduler_Release_critical( scheduler, &lock_context );
384
385    if ( success ) {
386      break;
387    }
388
389    node = _Chain_Next( node );
390  }
391#else
392  const Scheduler_Control *scheduler;
393
394  scheduler = _Scheduler_Get( the_thread );
395  ( *scheduler->Operations.unblock )(
396    scheduler,
397    the_thread,
398    _Thread_Scheduler_get_home_node( the_thread )
399  );
400#endif
401}
402
403/**
404 * @brief Propagates a priority change of a thread to the scheduler.
405 *
406 * On uni-processor configurations, this operation must evaluate the thread
407 * state.  In case the thread is not ready, then the priority update should be
408 * deferred to the next scheduler unblock operation.
409 *
410 * The operation must update the heir and thread dispatch necessary variables
411 * in case the set of scheduled threads changes.
412 *
413 * @param[in] the_thread The thread changing its priority.
414 *
415 * @see _Scheduler_Node_get_priority().
416 */
417RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
418{
419#if defined(RTEMS_SMP)
420  Chain_Node       *node;
421  const Chain_Node *tail;
422
423  _Thread_Scheduler_process_requests( the_thread );
424
425  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
426  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
427
428  do {
429    Scheduler_Node          *scheduler_node;
430    const Scheduler_Control *scheduler;
431    ISR_lock_Context         lock_context;
432
433    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
434    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
435
436    _Scheduler_Acquire_critical( scheduler, &lock_context );
437    ( *scheduler->Operations.update_priority )(
438      scheduler,
439      the_thread,
440      scheduler_node
441    );
442    _Scheduler_Release_critical( scheduler, &lock_context );
443
444    node = _Chain_Next( node );
445  } while ( node != tail );
446#else
447  const Scheduler_Control *scheduler;
448
449  scheduler = _Scheduler_Get( the_thread );
450  ( *scheduler->Operations.update_priority )(
451    scheduler,
452    the_thread,
453    _Thread_Scheduler_get_home_node( the_thread )
454  );
455#endif
456}
457
458#if defined(RTEMS_SMP)
459/**
460 * @brief Changes the sticky level of the home scheduler node and propagates a
461 * priority change of a thread to the scheduler.
462 *
463 * @param[in] the_thread The thread changing its priority or sticky level.
464 *
465 * @see _Scheduler_Update_priority().
466 */
467RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
468  Thread_Control *the_thread,
469  int             sticky_level_change
470)
471{
472  Chain_Node              *node;
473  const Chain_Node        *tail;
474  Scheduler_Node          *scheduler_node;
475  const Scheduler_Control *scheduler;
476  ISR_lock_Context         lock_context;
477
478  _Thread_Scheduler_process_requests( the_thread );
479
480  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
481  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
482  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
483
484  _Scheduler_Acquire_critical( scheduler, &lock_context );
485
486  ( *scheduler->Operations.update_priority )(
487    scheduler,
488    the_thread,
489    scheduler_node
490  );
491
492  _Scheduler_Release_critical( scheduler, &lock_context );
493
494  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
495  node = _Chain_Next( node );
496
497  while ( node != tail ) {
498    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
499    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
500
501    _Scheduler_Acquire_critical( scheduler, &lock_context );
502    ( *scheduler->Operations.update_priority )(
503      scheduler,
504      the_thread,
505      scheduler_node
506    );
507    _Scheduler_Release_critical( scheduler, &lock_context );
508
509    node = _Chain_Next( node );
510  }
511}
512#endif
513
514/**
515 * @brief Maps a thread priority from the user domain to the scheduler domain.
516 *
517 * Let M be the maximum scheduler priority.  The mapping must be bijective in
518 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
519 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
520 * other values the mapping is undefined.
521 *
522 * @param[in] scheduler The scheduler instance.
523 * @param[in] priority The user domain thread priority.
524 *
525 * @return The corresponding thread priority of the scheduler domain is returned.
526 */
527RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
528  const Scheduler_Control *scheduler,
529  Priority_Control         priority
530)
531{
532  return ( *scheduler->Operations.map_priority )( scheduler, priority );
533}
534
535/**
536 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
537 *
538 * @param[in] scheduler The scheduler instance.
539 * @param[in] priority The scheduler domain thread priority.
540 *
541 * @return The corresponding thread priority of the user domain is returned.
542 */
543RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
544  const Scheduler_Control *scheduler,
545  Priority_Control         priority
546)
547{
548  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
549}
550
551/**
552 * @brief Initializes a scheduler node.
553 *
554 * The scheduler node contains arbitrary data on function entry.  The caller
555 * must ensure that _Scheduler_Node_destroy() will be called after a
556 * _Scheduler_Node_initialize() before the memory of the scheduler node is
557 * destroyed.
558 *
559 * @param[in] scheduler The scheduler instance.
560 * @param[in] node The scheduler node to initialize.
561 * @param[in] the_thread The thread of the scheduler node to initialize.
562 * @param[in] priority The thread priority.
563 */
564RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
565  const Scheduler_Control *scheduler,
566  Scheduler_Node          *node,
567  Thread_Control          *the_thread,
568  Priority_Control         priority
569)
570{
571  ( *scheduler->Operations.node_initialize )(
572    scheduler,
573    node,
574    the_thread,
575    priority
576  );
577}
578
579/**
580 * @brief Destroys a scheduler node.
581 *
582 * The caller must ensure that _Scheduler_Node_destroy() will be called only
583 * after a corresponding _Scheduler_Node_initialize().
584 *
585 * @param[in] scheduler The scheduler instance.
586 * @param[in] node The scheduler node to destroy.
587 */
588RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
589  const Scheduler_Control *scheduler,
590  Scheduler_Node          *node
591)
592{
593  ( *scheduler->Operations.node_destroy )( scheduler, node );
594}
595
596/**
597 * @brief Releases a job of a thread with respect to the scheduler.
598 *
599 * @param[in] the_thread The thread.
600 * @param[in] priority_node The priority node of the job.
601 * @param[in] deadline The deadline in watchdog ticks since boot.
602 * @param[in] queue_context The thread queue context to provide the set of
603 *   threads for _Thread_Priority_update().
604 */
605RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
606  Thread_Control       *the_thread,
607  Priority_Node        *priority_node,
608  uint64_t              deadline,
609  Thread_queue_Context *queue_context
610)
611{
612  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
613
614  _Thread_queue_Context_clear_priority_updates( queue_context );
615  ( *scheduler->Operations.release_job )(
616    scheduler,
617    the_thread,
618    priority_node,
619    deadline,
620    queue_context
621  );
622}
623
624/**
625 * @brief Cancels a job of a thread with respect to the scheduler.
626 *
627 * @param[in] the_thread The thread.
628 * @param[in] priority_node The priority node of the job.
629 * @param[in] queue_context The thread queue context to provide the set of
630 *   threads for _Thread_Priority_update().
631 */
632RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
633  Thread_Control       *the_thread,
634  Priority_Node        *priority_node,
635  Thread_queue_Context *queue_context
636)
637{
638  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
639
640  _Thread_queue_Context_clear_priority_updates( queue_context );
641  ( *scheduler->Operations.cancel_job )(
642    scheduler,
643    the_thread,
644    priority_node,
645    queue_context
646  );
647}
648
649/**
650 * @brief Scheduler method invoked at each clock tick.
651 *
652 * This method is invoked at each clock tick to allow the scheduler
653 * implementation to perform any activities required.  For the
654 * scheduler which support standard RTEMS features, this includes
655 * time-slicing management.
656 */
657RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
658{
659  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
660  Thread_Control *executing = cpu->executing;
661
662  if ( scheduler != NULL && executing != NULL ) {
663    ( *scheduler->Operations.tick )( scheduler, executing );
664  }
665}
666
667/**
668 * @brief Starts the idle thread for a particular processor.
669 *
670 * @param[in] scheduler The scheduler instance.
671 * @param[in,out] the_thread The idle thread for the processor.
672 * @param[in,out] cpu The processor for the idle thread.
673 *
674 * @see _Thread_Create_idle().
675 */
676RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
677  const Scheduler_Control *scheduler,
678  Thread_Control          *the_thread,
679  Per_CPU_Control         *cpu
680)
681{
682  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
683}
684
685#if defined(RTEMS_SMP)
686RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
687  uint32_t cpu_index
688)
689{
690  return &_Scheduler_Assignments[ cpu_index ];
691}
692
693RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
694  const Scheduler_Assignment *assignment
695)
696{
697  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
698}
699
700RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
701  const Scheduler_Assignment *assignment
702)
703{
704  return assignment->scheduler != NULL;
705}
706#endif /* defined(RTEMS_SMP) */
707
708RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
709  const Scheduler_Control *scheduler,
710  uint32_t cpu_index
711)
712{
713#if defined(RTEMS_SMP)
714  const Scheduler_Assignment *assignment =
715    _Scheduler_Get_assignment( cpu_index );
716
717  return assignment->scheduler == scheduler;
718#else
719  (void) scheduler;
720  (void) cpu_index;
721
722  return true;
723#endif
724}
725
726#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
727
728RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
729  const Scheduler_Control *scheduler,
730  size_t                   cpusetsize,
731  cpu_set_t               *cpuset
732)
733{
734  uint32_t cpu_count = _SMP_Get_processor_count();
735  uint32_t cpu_index;
736
737  CPU_ZERO_S( cpusetsize, cpuset );
738
739  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
740#if defined(RTEMS_SMP)
741    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
742      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
743    }
744#else
745    (void) scheduler;
746
747    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
748#endif
749  }
750}
751
752RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
753  const Scheduler_Control *scheduler,
754  Thread_Control          *the_thread,
755  size_t                   cpusetsize,
756  cpu_set_t               *cpuset
757)
758{
759  (void) the_thread;
760
761  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
762
763  return true;
764}
765
766bool _Scheduler_Get_affinity(
767  Thread_Control *the_thread,
768  size_t          cpusetsize,
769  cpu_set_t      *cpuset
770);
771
772RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
773  const Scheduler_Control *scheduler,
774  Thread_Control          *the_thread,
775  size_t                   cpusetsize,
776  const cpu_set_t         *cpuset
777)
778{
779  uint32_t cpu_count = _SMP_Get_processor_count();
780  uint32_t cpu_index;
781  bool     ok = true;
782
783  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
784#if defined(RTEMS_SMP)
785    const Scheduler_Control *scheduler_of_cpu =
786      _Scheduler_Get_by_CPU_index( cpu_index );
787
788    ok = ok
789      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
790        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
791          && scheduler != scheduler_of_cpu ) );
792#else
793    (void) scheduler;
794
795    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
796#endif
797  }
798
799  return ok;
800}
801
802bool _Scheduler_Set_affinity(
803  Thread_Control  *the_thread,
804  size_t           cpusetsize,
805  const cpu_set_t *cpuset
806);
807
808#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
809
810RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
811  const Scheduler_Control *scheduler,
812  Thread_Control          *the_thread,
813  Scheduler_Node          *node,
814  void                  ( *extract )(
815                             const Scheduler_Control *,
816                             Thread_Control *,
817                             Scheduler_Node *
818                        ),
819  void                  ( *schedule )(
820                             const Scheduler_Control *,
821                             Thread_Control *,
822                             bool
823                        )
824)
825{
826  ( *extract )( scheduler, the_thread, node );
827
828  /* TODO: flash critical section? */
829
830  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
831    ( *schedule )( scheduler, the_thread, true );
832  }
833}
834
835RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
836  const Scheduler_Control *scheduler
837)
838{
839#if defined(RTEMS_SMP)
840  return _Scheduler_Get_context( scheduler )->processor_count;
841#else
842  (void) scheduler;
843
844  return 1;
845#endif
846}
847
848RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
849{
850  return _Objects_Build_id(
851    OBJECTS_FAKE_OBJECTS_API,
852    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
853    _Objects_Local_node,
854    (uint16_t) ( scheduler_index + 1 )
855  );
856}
857
858RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
859{
860  uint32_t minimum_id = _Scheduler_Build_id( 0 );
861
862  return id - minimum_id;
863}
864
865RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
866  Objects_Id                id,
867  const Scheduler_Control **scheduler_p
868)
869{
870  uint32_t index = _Scheduler_Get_index_by_id( id );
871  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
872
873  *scheduler_p = scheduler;
874
875  return index < _Scheduler_Count
876    && _Scheduler_Get_processor_count( scheduler ) > 0;
877}
878
879RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
880{
881  const Scheduler_Control *scheduler;
882  bool ok = _Scheduler_Get_by_id( id, &scheduler );
883
884  (void) scheduler;
885
886  return ok;
887}
888
889RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
890  const Scheduler_Control *scheduler
891)
892{
893  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
894}
895
896RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
897  Thread_Control   *the_thread,
898  Priority_Control  new_priority,
899  bool              prepend_it
900)
901{
902  Scheduler_Node *scheduler_node;
903
904  scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
905  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
906}
907
908#if defined(RTEMS_SMP)
909/**
910 * @brief Gets an idle thread from the scheduler instance.
911 *
912 * @param[in] context The scheduler instance context.
913 *
914 * @retval idle An idle thread for use.  This function must always return an
915 * idle thread.  If none is available, then this is a fatal error.
916 */
917typedef Thread_Control *( *Scheduler_Get_idle_thread )(
918  Scheduler_Context *context
919);
920
921/**
922 * @brief Releases an idle thread to the scheduler instance for reuse.
923 *
924 * @param[in] context The scheduler instance context.
925 * @param[in] idle The idle thread to release
926 */
927typedef void ( *Scheduler_Release_idle_thread )(
928  Scheduler_Context *context,
929  Thread_Control    *idle
930);
931
932RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
933  Thread_Control *the_thread,
934  Scheduler_Node *node
935)
936{
937  the_thread->Scheduler.node = node;
938}
939
940RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
941  Thread_Control       *the_thread,
942  Scheduler_Node       *node,
943  const Thread_Control *previous_user_of_node
944)
945{
946  const Scheduler_Control *scheduler =
947    _Scheduler_Get_own( previous_user_of_node );
948
949  the_thread->Scheduler.control = scheduler;
950  _Scheduler_Thread_set_node( the_thread, node );
951}
952
953extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
954
955RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
956  Thread_Control         *the_thread,
957  Thread_Scheduler_state  new_state
958)
959{
960  _Assert(
961    _Scheduler_Thread_state_valid_state_changes
962      [ the_thread->Scheduler.state ][ new_state ]
963  );
964  _Assert(
965    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
966      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
967      || !_System_state_Is_up( _System_state_Get() )
968  );
969
970  the_thread->Scheduler.state = new_state;
971}
972
973RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
974  Scheduler_Node *node,
975  Thread_Control *idle
976)
977{
978  _Assert(
979    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
980      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
981  );
982  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
983  _Assert(
984    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
985  );
986
987  _Scheduler_Thread_set_node( idle, node );
988
989  _Scheduler_Node_set_user( node, idle );
990  node->idle = idle;
991}
992
993/**
994 * @brief Use an idle thread for this scheduler node.
995 *
996 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
997 * helping state may use an idle thread for the scheduler node owned by itself
998 * in case it executes currently using another scheduler node or in case it is
999 * in a blocking state.
1000 *
1001 * @param[in] context The scheduler instance context.
1002 * @param[in] node The node which wants to use the idle thread.
1003 * @param[in] get_idle_thread Function to get an idle thread.
1004 */
1005RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
1006  Scheduler_Context         *context,
1007  Scheduler_Node            *node,
1008  Scheduler_Get_idle_thread  get_idle_thread
1009)
1010{
1011  Thread_Control *idle = ( *get_idle_thread )( context );
1012
1013  _Scheduler_Set_idle_thread( node, idle );
1014
1015  return idle;
1016}
1017
1018typedef enum {
1019  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
1020  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
1021  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
1022} Scheduler_Try_to_schedule_action;
1023
1024/**
1025 * @brief Try to schedule this scheduler node.
1026 *
1027 * @param[in] context The scheduler instance context.
1028 * @param[in] node The node which wants to get scheduled.
1029 * @param[in] idle A potential idle thread used by a potential victim node.
1030 * @param[in] get_idle_thread Function to get an idle thread.
1031 *
1032 * @retval true This node can be scheduled.
1033 * @retval false Otherwise.
1034 */
1035RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
1036_Scheduler_Try_to_schedule_node(
1037  Scheduler_Context         *context,
1038  Scheduler_Node            *node,
1039  Thread_Control            *idle,
1040  Scheduler_Get_idle_thread  get_idle_thread
1041)
1042{
1043  ISR_lock_Context                  lock_context;
1044  Scheduler_Try_to_schedule_action  action;
1045  Thread_Control                   *owner;
1046  Thread_Control                   *user;
1047
1048  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
1049  user = _Scheduler_Node_get_user( node );
1050
1051  _Thread_Scheduler_acquire_critical( user, &lock_context );
1052
1053  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1054    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1055      _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) );
1056      _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1057    } else {
1058      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1059    }
1060
1061    _Thread_Scheduler_release_critical( user, &lock_context );
1062    return action;
1063  }
1064
1065  owner = _Scheduler_Node_get_owner( node );
1066
1067  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
1068    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1069      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1070    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
1071      if ( idle != NULL ) {
1072        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1073      } else {
1074        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1075      }
1076    } else {
1077      _Scheduler_Node_set_user( node, owner );
1078    }
1079  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1080    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1081      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1082    } else if ( idle != NULL ) {
1083      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1084    } else {
1085      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1086    }
1087  } else {
1088    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1089
1090    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1091      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1092    } else {
1093      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1094    }
1095  }
1096
1097  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1098    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1099  }
1100
1101  _Thread_Scheduler_release_critical( user, &lock_context );
1102  return action;
1103}
1104
1105/**
1106 * @brief Release an idle thread using this scheduler node.
1107 *
1108 * @param[in] context The scheduler instance context.
1109 * @param[in] node The node which may have an idle thread as user.
1110 * @param[in] release_idle_thread Function to release an idle thread.
1111 *
1112 * @retval idle The idle thread which used this node.
1113 * @retval NULL This node had no idle thread as an user.
1114 */
1115RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1116  Scheduler_Context             *context,
1117  Scheduler_Node                *node,
1118  Scheduler_Release_idle_thread  release_idle_thread
1119)
1120{
1121  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1122
1123  if ( idle != NULL ) {
1124    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1125
1126    node->idle = NULL;
1127    _Scheduler_Node_set_user( node, owner );
1128    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
1129    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
1130
1131    ( *release_idle_thread )( context, idle );
1132  }
1133
1134  return idle;
1135}
1136
1137RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1138  Scheduler_Node *needs_idle,
1139  Scheduler_Node *uses_idle,
1140  Thread_Control *idle
1141)
1142{
1143  uses_idle->idle = NULL;
1144  _Scheduler_Node_set_user(
1145    uses_idle,
1146    _Scheduler_Node_get_owner( uses_idle )
1147  );
1148  _Scheduler_Set_idle_thread( needs_idle, idle );
1149}
1150
1151/**
1152 * @brief Block this scheduler node.
1153 *
1154 * @param[in] context The scheduler instance context.
1155 * @param[in] thread The thread which wants to get blocked referencing this
1156 *   node.  This is not necessarily the user of this node in case the node
1157 *   participates in the scheduler helping protocol.
1158 * @param[in] node The node which wants to get blocked.
1159 * @param[in] is_scheduled This node is scheduled.
1160 * @param[in] get_idle_thread Function to get an idle thread.
1161 *
1162 * @retval thread_cpu The processor of the thread.  Indicates to continue with
1163 *   the blocking operation.
1164 * @retval NULL Otherwise.
1165 */
1166RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
1167  Scheduler_Context         *context,
1168  Thread_Control            *thread,
1169  Scheduler_Node            *node,
1170  bool                       is_scheduled,
1171  Scheduler_Get_idle_thread  get_idle_thread
1172)
1173{
1174  ISR_lock_Context  lock_context;
1175  Thread_Control   *old_user;
1176  Thread_Control   *new_user;
1177  Per_CPU_Control  *thread_cpu;
1178
1179  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1180  thread_cpu = _Thread_Get_CPU( thread );
1181  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
1182  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1183  _Thread_Scheduler_release_critical( thread, &lock_context );
1184
1185  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1186    _Assert( thread == _Scheduler_Node_get_user( node ) );
1187
1188    return thread_cpu;
1189  }
1190
1191  new_user = NULL;
1192
1193  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1194    if ( is_scheduled ) {
1195      _Assert( thread == _Scheduler_Node_get_user( node ) );
1196      old_user = thread;
1197      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1198    }
1199  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
1200    if ( is_scheduled ) {
1201      old_user = _Scheduler_Node_get_user( node );
1202
1203      if ( thread == old_user ) {
1204        Thread_Control *owner = _Scheduler_Node_get_owner( node );
1205
1206        if (
1207          thread != owner
1208            && owner->Scheduler.state == THREAD_SCHEDULER_READY
1209        ) {
1210          new_user = owner;
1211          _Scheduler_Node_set_user( node, new_user );
1212        } else {
1213          new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1214        }
1215      }
1216    }
1217  } else {
1218    /* Not implemented, this is part of the OMIP support path. */
1219    _Assert(0);
1220  }
1221
1222  if ( new_user != NULL ) {
1223    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1224
1225    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1226    _Thread_Set_CPU( new_user, cpu );
1227    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1228  }
1229
1230  return NULL;
1231}
1232
1233/**
1234 * @brief Unblock this scheduler node.
1235 *
1236 * @param[in] context The scheduler instance context.
1237 * @param[in] the_thread The thread which wants to get unblocked.
1238 * @param[in] node The node which wants to get unblocked.
1239 * @param[in] is_scheduled This node is scheduled.
1240 * @param[in] release_idle_thread Function to release an idle thread.
1241 *
1242 * @retval true Continue with the unblocking operation.
1243 * @retval false Otherwise.
1244 */
1245RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1246  Scheduler_Context             *context,
1247  Thread_Control                *the_thread,
1248  Scheduler_Node                *node,
1249  bool                           is_scheduled,
1250  Scheduler_Release_idle_thread  release_idle_thread
1251)
1252{
1253  bool unblock;
1254
1255  if ( is_scheduled ) {
1256    Thread_Control *old_user = _Scheduler_Node_get_user( node );
1257    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1258    Thread_Control *idle = _Scheduler_Release_idle_thread(
1259      context,
1260      node,
1261      release_idle_thread
1262    );
1263    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1264    Thread_Control *new_user;
1265
1266    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1267      _Assert( idle != NULL );
1268      new_user = the_thread;
1269    } else if ( idle != NULL ) {
1270      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1271      new_user = the_thread;
1272    } else if ( the_thread != owner ) {
1273      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1274      _Assert( old_user != the_thread );
1275      _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
1276      new_user = the_thread;
1277      _Scheduler_Node_set_user( node, new_user );
1278    } else {
1279      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1280      _Assert( old_user != the_thread );
1281      _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1282      new_user = NULL;
1283    }
1284
1285    if ( new_user != NULL ) {
1286      _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1287      _Thread_Set_CPU( new_user, cpu );
1288      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1289    }
1290
1291    unblock = false;
1292  } else {
1293    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1294
1295    unblock = true;
1296  }
1297
1298  return unblock;
1299}
1300#endif
1301
1302RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1303  Thread_Control *new_heir,
1304  bool            force_dispatch
1305)
1306{
1307  Thread_Control *heir = _Thread_Heir;
1308
1309  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1310#if defined(RTEMS_SMP)
1311    /*
1312     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1313     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1314     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1315     * schedulers.
1316     */
1317    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1318    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1319#endif
1320    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1321    _Thread_Heir = new_heir;
1322    _Thread_Dispatch_necessary = true;
1323  }
1324}
1325
1326RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1327  const Scheduler_Control *new_scheduler,
1328  Thread_Control          *the_thread,
1329  Priority_Control         priority
1330)
1331{
1332  Scheduler_Node *new_scheduler_node;
1333  Scheduler_Node *old_scheduler_node;
1334
1335  if (
1336    _Thread_Owns_resources( the_thread )
1337      || the_thread->Wait.queue != NULL
1338  ) {
1339    return STATUS_RESOURCE_IN_USE;
1340  }
1341
1342  old_scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1343  _Priority_Plain_extract(
1344    &old_scheduler_node->Wait.Priority,
1345    &the_thread->Real_priority
1346  );
1347
1348  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1349    _Priority_Plain_insert(
1350      &old_scheduler_node->Wait.Priority,
1351      &the_thread->Real_priority,
1352      the_thread->Real_priority.priority
1353    );
1354    return STATUS_RESOURCE_IN_USE;
1355  }
1356
1357#if defined(RTEMS_SMP)
1358  _Thread_Scheduler_process_requests( the_thread );
1359  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1360    the_thread,
1361    _Scheduler_Get_index( new_scheduler )
1362  );
1363#else
1364  new_scheduler_node = old_scheduler_node;
1365#endif
1366
1367  the_thread->Start.initial_priority = priority;
1368  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1369  _Priority_Initialize_one(
1370    &new_scheduler_node->Wait.Priority,
1371    &the_thread->Real_priority
1372  );
1373
1374#if defined(RTEMS_SMP)
1375  _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1376  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1377  _Chain_Initialize_one(
1378    &the_thread->Scheduler.Wait_nodes,
1379    &new_scheduler_node->Thread.Wait_node
1380  );
1381  _Chain_Extract_unprotected(
1382    &old_scheduler_node->Thread.Scheduler_node.Chain
1383  );
1384  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1385  _Chain_Initialize_one(
1386    &the_thread->Scheduler.Scheduler_nodes,
1387    &new_scheduler_node->Thread.Scheduler_node.Chain
1388  );
1389
1390  {
1391    const Scheduler_Control *old_scheduler;
1392
1393    old_scheduler = _Scheduler_Get( the_thread );
1394
1395    if ( old_scheduler != new_scheduler ) {
1396      States_Control current_state;
1397
1398      current_state = the_thread->current_state;
1399
1400      if ( _States_Is_ready( current_state ) ) {
1401        _Scheduler_Block( the_thread );
1402      }
1403
1404      the_thread->Scheduler.own_control = new_scheduler;
1405      the_thread->Scheduler.control = new_scheduler;
1406      the_thread->Scheduler.own_node = new_scheduler_node;
1407      the_thread->Scheduler.node = new_scheduler_node;
1408      _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1409
1410      if ( _States_Is_ready( current_state ) ) {
1411        _Scheduler_Unblock( the_thread );
1412      }
1413
1414      return STATUS_SUCCESSFUL;
1415    }
1416  }
1417#endif
1418
1419  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1420  _Scheduler_Update_priority( the_thread );
1421  return STATUS_SUCCESSFUL;
1422}
1423
1424/** @} */
1425
1426#ifdef __cplusplus
1427}
1428#endif
1429
1430#endif
1431/* end of include file */
Note: See TracBrowser for help on using the repository browser.