source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 07a32d19

5
Last change on this file since 07a32d19 was 72e0bdb, checked in by Sebastian Huber <sebastian.huber@…>, on 10/10/16 at 12:50:19

score: Pass scheduler node to unblock operation

Changed for consistency with other scheduler operations.

Update #2556.

  • Property mode set to 100644
File size: 40.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/cpusetimpl.h>
25#include <rtems/score/priorityimpl.h>
26#include <rtems/score/smpimpl.h>
27#include <rtems/score/status.h>
28#include <rtems/score/threadimpl.h>
29
30#ifdef __cplusplus
31extern "C" {
32#endif
33
34/**
35 * @addtogroup ScoreScheduler
36 */
37/**@{**/
38
39/**
40 *  @brief Initializes the scheduler to the policy chosen by the user.
41 *
42 *  This routine initializes the scheduler to the policy chosen by the user
43 *  through confdefs, or to the priority scheduler with ready chains by
44 *  default.
45 */
46void _Scheduler_Handler_initialization( void );
47
48RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
49  const Scheduler_Control *scheduler
50)
51{
52  return scheduler->context;
53}
54
55RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
56  const Thread_Control *the_thread
57)
58{
59#if defined(RTEMS_SMP)
60  return the_thread->Scheduler.control;
61#else
62  (void) the_thread;
63
64  return &_Scheduler_Table[ 0 ];
65#endif
66}
67
68RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
69  const Thread_Control *the_thread
70)
71{
72#if defined(RTEMS_SMP)
73  return the_thread->Scheduler.own_control;
74#else
75  (void) the_thread;
76
77  return &_Scheduler_Table[ 0 ];
78#endif
79}
80
81RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
82  uint32_t cpu_index
83)
84{
85#if defined(RTEMS_SMP)
86  return _Scheduler_Assignments[ cpu_index ].scheduler;
87#else
88  (void) cpu_index;
89
90  return &_Scheduler_Table[ 0 ];
91#endif
92}
93
94RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
95  const Per_CPU_Control *cpu
96)
97{
98  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
99
100  return _Scheduler_Get_by_CPU_index( cpu_index );
101}
102
103ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
104
105/**
106 * @brief Acquires the scheduler instance inside a critical section (interrupts
107 * disabled).
108 *
109 * @param[in] scheduler The scheduler instance.
110 * @param[in] lock_context The lock context to use for
111 *   _Scheduler_Release_critical().
112 */
113RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
114  const Scheduler_Control *scheduler,
115  ISR_lock_Context        *lock_context
116)
117{
118  (void) scheduler;
119  _ISR_lock_Acquire( &_Scheduler_Lock, lock_context );
120}
121
122/**
123 * @brief Releases the scheduler instance inside a critical section (interrupts
124 * disabled).
125 *
126 * @param[in] scheduler The scheduler instance.
127 * @param[in] lock_context The lock context used for
128 *   _Scheduler_Acquire_critical().
129 */
130RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
131  const Scheduler_Control *scheduler,
132  ISR_lock_Context        *lock_context
133)
134{
135  (void) scheduler;
136  _ISR_lock_Release( &_Scheduler_Lock, lock_context );
137}
138
139RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
140  const Thread_Control *the_thread
141)
142{
143#if defined(RTEMS_SMP)
144  return the_thread->Scheduler.node;
145#else
146  return the_thread->Scheduler.nodes;
147#endif
148}
149
150/**
151 * The preferred method to add a new scheduler is to define the jump table
152 * entries and add a case to the _Scheduler_Initialize routine.
153 *
154 * Generic scheduling implementations that rely on the ready queue only can
155 * be found in the _Scheduler_queue_XXX functions.
156 */
157
158/*
159 * Passing the Scheduler_Control* to these functions allows for multiple
160 * scheduler's to exist simultaneously, which could be useful on an SMP
161 * system.  Then remote Schedulers may be accessible.  How to protect such
162 * accesses remains an open problem.
163 */
164
165/**
166 * @brief General scheduling decision.
167 *
168 * This kernel routine implements the scheduling decision logic for
169 * the scheduler. It does NOT dispatch.
170 *
171 * @param[in] the_thread The thread which state changed previously.
172 */
173RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
174{
175  const Scheduler_Control *scheduler;
176  ISR_lock_Context         lock_context;
177
178  scheduler = _Scheduler_Get( the_thread );
179  _Scheduler_Acquire_critical( scheduler, &lock_context );
180
181  ( *scheduler->Operations.schedule )( scheduler, the_thread );
182
183  _Scheduler_Release_critical( scheduler, &lock_context );
184}
185
186#if defined(RTEMS_SMP)
187typedef struct {
188  Thread_Control *needs_help;
189  Thread_Control *next_needs_help;
190} Scheduler_Ask_for_help_context ;
191
192RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
193  Resource_Node *resource_node,
194  void          *arg
195)
196{
197  bool done;
198  Scheduler_Ask_for_help_context *help_context = arg;
199  Thread_Control *previous_needs_help = help_context->needs_help;
200  Thread_Control *next_needs_help;
201  Thread_Control *offers_help =
202    THREAD_RESOURCE_NODE_TO_THREAD( resource_node );
203  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
204
205  next_needs_help = ( *scheduler->Operations.ask_for_help_X )(
206    scheduler,
207    offers_help,
208    previous_needs_help
209  );
210
211  done = next_needs_help != previous_needs_help;
212
213  if ( done ) {
214    help_context->next_needs_help = next_needs_help;
215  }
216
217  return done;
218}
219
220/**
221 * @brief Ask threads depending on resources owned by the thread for help.
222 *
223 * A thread is in need for help if it lost its assigned processor due to
224 * pre-emption by a higher priority thread or it was not possible to assign it
225 * a processor since its priority is to low on its current scheduler instance.
226 *
227 * The run-time of this function depends on the size of the resource tree of
228 * the thread needing help and other resource trees in case threads in need for
229 * help are produced during this operation.
230 *
231 * @param[in] needs_help The thread needing help.
232 */
233RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_X(
234  Thread_Control *needs_help
235)
236{
237  do {
238    const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );
239
240    needs_help = ( *scheduler->Operations.ask_for_help_X )(
241      scheduler,
242      needs_help,
243      needs_help
244    );
245
246    if ( needs_help != NULL ) {
247      Scheduler_Ask_for_help_context help_context = { needs_help, NULL };
248
249      _Resource_Iterate(
250        &needs_help->Resource_node,
251        _Scheduler_Ask_for_help_visitor,
252        &help_context
253      );
254
255      needs_help = help_context.next_needs_help;
256    }
257  } while ( needs_help != NULL );
258}
259
260RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
261  Thread_Control *needs_help
262)
263{
264  if (
265    needs_help != NULL
266      && _Resource_Node_owns_resources( &needs_help->Resource_node )
267  ) {
268    Scheduler_Node *node = _Thread_Scheduler_get_own_node( needs_help );
269
270    if (
271      node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
272        || _Scheduler_Node_get_user( node ) != needs_help
273    ) {
274      _Scheduler_Ask_for_help_X( needs_help );
275    }
276  }
277}
278#endif
279
280/**
281 * @brief Scheduler yield with a particular thread.
282 *
283 * This routine is invoked when a thread wishes to voluntarily transfer control
284 * of the processor to another thread.
285 *
286 * @param[in] the_thread The yielding thread.
287 */
288RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
289{
290  const Scheduler_Control *scheduler;
291  ISR_lock_Context         lock_context;
292#if defined(RTEMS_SMP)
293  Thread_Control          *needs_help;
294#endif
295
296  scheduler = _Scheduler_Get( the_thread );
297  _Scheduler_Acquire_critical( scheduler, &lock_context );
298
299#if defined(RTEMS_SMP)
300  needs_help =
301#endif
302  ( *scheduler->Operations.yield )(
303    scheduler,
304    the_thread,
305    _Thread_Scheduler_get_home_node( the_thread )
306  );
307
308#if defined(RTEMS_SMP)
309  _Scheduler_Ask_for_help_if_necessary( needs_help );
310#endif
311
312  _Scheduler_Release_critical( scheduler, &lock_context );
313}
314
315/**
316 * @brief Blocks a thread with respect to the scheduler.
317 *
318 * This routine removes @a the_thread from the scheduling decision for
319 * the scheduler. The primary task is to remove the thread from the
320 * ready queue.  It performs any necessary schedulering operations
321 * including the selection of a new heir thread.
322 *
323 * @param[in] the_thread The thread.
324 */
325RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
326{
327  const Scheduler_Control *scheduler;
328  ISR_lock_Context         lock_context;
329
330  scheduler = _Scheduler_Get( the_thread );
331  _Scheduler_Acquire_critical( scheduler, &lock_context );
332
333  ( *scheduler->Operations.block )(
334    scheduler,
335    the_thread,
336    _Thread_Scheduler_get_home_node( the_thread )
337  );
338
339  _Scheduler_Release_critical( scheduler, &lock_context );
340}
341
342/**
343 * @brief Unblocks a thread with respect to the scheduler.
344 *
345 * This operation must fetch the latest thread priority value for this
346 * scheduler instance and update its internal state if necessary.
347 *
348 * @param[in] the_thread The thread.
349 *
350 * @see _Scheduler_Node_get_priority().
351 */
352RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
353{
354  const Scheduler_Control *scheduler;
355  ISR_lock_Context         lock_context;
356#if defined(RTEMS_SMP)
357  Thread_Control          *needs_help;
358#endif
359
360  scheduler = _Scheduler_Get( the_thread );
361  _Scheduler_Acquire_critical( scheduler, &lock_context );
362
363#if defined(RTEMS_SMP)
364  needs_help =
365#endif
366  ( *scheduler->Operations.unblock )(
367    scheduler,
368    the_thread,
369    _Thread_Scheduler_get_home_node( the_thread )
370  );
371
372#if defined(RTEMS_SMP)
373  _Scheduler_Ask_for_help_if_necessary( needs_help );
374#endif
375
376  _Scheduler_Release_critical( scheduler, &lock_context );
377}
378
379/**
380 * @brief Propagates a priority change of a thread to the scheduler.
381 *
382 * On uni-processor configurations, this operation must evaluate the thread
383 * state.  In case the thread is not ready, then the priority update should be
384 * deferred to the next scheduler unblock operation.
385 *
386 * The operation must update the heir and thread dispatch necessary variables
387 * in case the set of scheduled threads changes.
388 *
389 * @param[in] the_thread The thread changing its priority.
390 *
391 * @see _Scheduler_Node_get_priority().
392 */
393RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
394{
395  const Scheduler_Control *own_scheduler;
396  ISR_lock_Context         lock_context;
397#if defined(RTEMS_SMP)
398  Thread_Control          *needs_help;
399#endif
400
401  own_scheduler = _Scheduler_Get_own( the_thread );
402  _Scheduler_Acquire_critical( own_scheduler, &lock_context );
403
404#if defined(RTEMS_SMP)
405  needs_help =
406#endif
407  ( *own_scheduler->Operations.update_priority )(
408    own_scheduler,
409    the_thread,
410    _Thread_Scheduler_get_home_node( the_thread )
411  );
412
413#if defined(RTEMS_SMP)
414  _Scheduler_Ask_for_help_if_necessary( needs_help );
415#endif
416
417  _Scheduler_Release_critical( own_scheduler, &lock_context );
418}
419
420/**
421 * @brief Maps a thread priority from the user domain to the scheduler domain.
422 *
423 * Let M be the maximum scheduler priority.  The mapping must be bijective in
424 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
425 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
426 * other values the mapping is undefined.
427 *
428 * @param[in] scheduler The scheduler instance.
429 * @param[in] priority The user domain thread priority.
430 *
431 * @return The corresponding thread priority of the scheduler domain is returned.
432 */
433RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
434  const Scheduler_Control *scheduler,
435  Priority_Control         priority
436)
437{
438  return ( *scheduler->Operations.map_priority )( scheduler, priority );
439}
440
441/**
442 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
443 *
444 * @param[in] scheduler The scheduler instance.
445 * @param[in] priority The scheduler domain thread priority.
446 *
447 * @return The corresponding thread priority of the user domain is returned.
448 */
449RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
450  const Scheduler_Control *scheduler,
451  Priority_Control         priority
452)
453{
454  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
455}
456
457/**
458 * @brief Initializes a scheduler node.
459 *
460 * The scheduler node contains arbitrary data on function entry.  The caller
461 * must ensure that _Scheduler_Node_destroy() will be called after a
462 * _Scheduler_Node_initialize() before the memory of the scheduler node is
463 * destroyed.
464 *
465 * @param[in] scheduler The scheduler instance.
466 * @param[in] node The scheduler node to initialize.
467 * @param[in] the_thread The thread of the scheduler node to initialize.
468 * @param[in] priority The thread priority.
469 */
470RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
471  const Scheduler_Control *scheduler,
472  Scheduler_Node          *node,
473  Thread_Control          *the_thread,
474  Priority_Control         priority
475)
476{
477  ( *scheduler->Operations.node_initialize )(
478    scheduler,
479    node,
480    the_thread,
481    priority
482  );
483}
484
485/**
486 * @brief Destroys a scheduler node.
487 *
488 * The caller must ensure that _Scheduler_Node_destroy() will be called only
489 * after a corresponding _Scheduler_Node_initialize().
490 *
491 * @param[in] scheduler The scheduler instance.
492 * @param[in] node The scheduler node to destroy.
493 */
494RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
495  const Scheduler_Control *scheduler,
496  Scheduler_Node          *node
497)
498{
499  ( *scheduler->Operations.node_destroy )( scheduler, node );
500}
501
502/**
503 * @brief Releases a job of a thread with respect to the scheduler.
504 *
505 * @param[in] the_thread The thread.
506 * @param[in] priority_node The priority node of the job.
507 * @param[in] deadline The deadline in watchdog ticks since boot.
508 * @param[in] queue_context The thread queue context to provide the set of
509 *   threads for _Thread_Priority_update().
510 */
511RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
512  Thread_Control       *the_thread,
513  Priority_Node        *priority_node,
514  uint64_t              deadline,
515  Thread_queue_Context *queue_context
516)
517{
518  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
519
520  _Thread_queue_Context_clear_priority_updates( queue_context );
521  ( *scheduler->Operations.release_job )(
522    scheduler,
523    the_thread,
524    priority_node,
525    deadline,
526    queue_context
527  );
528}
529
530/**
531 * @brief Cancels a job of a thread with respect to the scheduler.
532 *
533 * @param[in] the_thread The thread.
534 * @param[in] priority_node The priority node of the job.
535 * @param[in] queue_context The thread queue context to provide the set of
536 *   threads for _Thread_Priority_update().
537 */
538RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
539  Thread_Control       *the_thread,
540  Priority_Node        *priority_node,
541  Thread_queue_Context *queue_context
542)
543{
544  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
545
546  _Thread_queue_Context_clear_priority_updates( queue_context );
547  ( *scheduler->Operations.cancel_job )(
548    scheduler,
549    the_thread,
550    priority_node,
551    queue_context
552  );
553}
554
555/**
556 * @brief Scheduler method invoked at each clock tick.
557 *
558 * This method is invoked at each clock tick to allow the scheduler
559 * implementation to perform any activities required.  For the
560 * scheduler which support standard RTEMS features, this includes
561 * time-slicing management.
562 */
563RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
564{
565  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
566  Thread_Control *executing = cpu->executing;
567
568  if ( scheduler != NULL && executing != NULL ) {
569    ( *scheduler->Operations.tick )( scheduler, executing );
570  }
571}
572
573/**
574 * @brief Starts the idle thread for a particular processor.
575 *
576 * @param[in] scheduler The scheduler instance.
577 * @param[in,out] the_thread The idle thread for the processor.
578 * @param[in,out] cpu The processor for the idle thread.
579 *
580 * @see _Thread_Create_idle().
581 */
582RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
583  const Scheduler_Control *scheduler,
584  Thread_Control          *the_thread,
585  Per_CPU_Control         *cpu
586)
587{
588  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
589}
590
591#if defined(RTEMS_SMP)
592RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
593  uint32_t cpu_index
594)
595{
596  return &_Scheduler_Assignments[ cpu_index ];
597}
598
599RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
600  const Scheduler_Assignment *assignment
601)
602{
603  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
604}
605
606RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
607  const Scheduler_Assignment *assignment
608)
609{
610  return assignment->scheduler != NULL;
611}
612#endif /* defined(RTEMS_SMP) */
613
614RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
615  const Scheduler_Control *scheduler,
616  uint32_t cpu_index
617)
618{
619#if defined(RTEMS_SMP)
620  const Scheduler_Assignment *assignment =
621    _Scheduler_Get_assignment( cpu_index );
622
623  return assignment->scheduler == scheduler;
624#else
625  (void) scheduler;
626  (void) cpu_index;
627
628  return true;
629#endif
630}
631
632#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
633
634RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
635  const Scheduler_Control *scheduler,
636  size_t                   cpusetsize,
637  cpu_set_t               *cpuset
638)
639{
640  uint32_t cpu_count = _SMP_Get_processor_count();
641  uint32_t cpu_index;
642
643  CPU_ZERO_S( cpusetsize, cpuset );
644
645  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
646#if defined(RTEMS_SMP)
647    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
648      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
649    }
650#else
651    (void) scheduler;
652
653    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
654#endif
655  }
656}
657
658RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
659  const Scheduler_Control *scheduler,
660  Thread_Control          *the_thread,
661  size_t                   cpusetsize,
662  cpu_set_t               *cpuset
663)
664{
665  (void) the_thread;
666
667  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
668
669  return true;
670}
671
672bool _Scheduler_Get_affinity(
673  Thread_Control *the_thread,
674  size_t          cpusetsize,
675  cpu_set_t      *cpuset
676);
677
678RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
679  const Scheduler_Control *scheduler,
680  Thread_Control          *the_thread,
681  size_t                   cpusetsize,
682  const cpu_set_t         *cpuset
683)
684{
685  uint32_t cpu_count = _SMP_Get_processor_count();
686  uint32_t cpu_index;
687  bool     ok = true;
688
689  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
690#if defined(RTEMS_SMP)
691    const Scheduler_Control *scheduler_of_cpu =
692      _Scheduler_Get_by_CPU_index( cpu_index );
693
694    ok = ok
695      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
696        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
697          && scheduler != scheduler_of_cpu ) );
698#else
699    (void) scheduler;
700
701    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
702#endif
703  }
704
705  return ok;
706}
707
708bool _Scheduler_Set_affinity(
709  Thread_Control  *the_thread,
710  size_t           cpusetsize,
711  const cpu_set_t *cpuset
712);
713
714#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
715
716RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
717  const Scheduler_Control *scheduler,
718  Thread_Control          *the_thread,
719  Scheduler_Node          *node,
720  void                  ( *extract )(
721                             const Scheduler_Control *,
722                             Thread_Control *,
723                             Scheduler_Node *
724                        ),
725  void                  ( *schedule )(
726                             const Scheduler_Control *,
727                             Thread_Control *,
728                             bool
729                        )
730)
731{
732  ( *extract )( scheduler, the_thread, node );
733
734  /* TODO: flash critical section? */
735
736  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
737    ( *schedule )( scheduler, the_thread, true );
738  }
739}
740
741RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
742  const Scheduler_Control *scheduler
743)
744{
745#if defined(RTEMS_SMP)
746  return _Scheduler_Get_context( scheduler )->processor_count;
747#else
748  (void) scheduler;
749
750  return 1;
751#endif
752}
753
754RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
755{
756  return _Objects_Build_id(
757    OBJECTS_FAKE_OBJECTS_API,
758    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
759    _Objects_Local_node,
760    (uint16_t) ( scheduler_index + 1 )
761  );
762}
763
764RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
765{
766  uint32_t minimum_id = _Scheduler_Build_id( 0 );
767
768  return id - minimum_id;
769}
770
771RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
772  Objects_Id                id,
773  const Scheduler_Control **scheduler_p
774)
775{
776  uint32_t index = _Scheduler_Get_index_by_id( id );
777  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
778
779  *scheduler_p = scheduler;
780
781  return index < _Scheduler_Count
782    && _Scheduler_Get_processor_count( scheduler ) > 0;
783}
784
785RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
786{
787  const Scheduler_Control *scheduler;
788  bool ok = _Scheduler_Get_by_id( id, &scheduler );
789
790  (void) scheduler;
791
792  return ok;
793}
794
795RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
796  const Scheduler_Control *scheduler
797)
798{
799  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
800}
801
802RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
803  Thread_Control   *the_thread,
804  Priority_Control  new_priority,
805  bool              prepend_it
806)
807{
808  Scheduler_Node *scheduler_node;
809
810  scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
811  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
812}
813
814#if defined(RTEMS_SMP)
815/**
816 * @brief Gets an idle thread from the scheduler instance.
817 *
818 * @param[in] context The scheduler instance context.
819 *
820 * @retval idle An idle thread for use.  This function must always return an
821 * idle thread.  If none is available, then this is a fatal error.
822 */
823typedef Thread_Control *( *Scheduler_Get_idle_thread )(
824  Scheduler_Context *context
825);
826
827/**
828 * @brief Releases an idle thread to the scheduler instance for reuse.
829 *
830 * @param[in] context The scheduler instance context.
831 * @param[in] idle The idle thread to release
832 */
833typedef void ( *Scheduler_Release_idle_thread )(
834  Scheduler_Context *context,
835  Thread_Control    *idle
836);
837
838RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
839  Thread_Control *the_thread,
840  Scheduler_Node *node
841)
842{
843  the_thread->Scheduler.node = node;
844}
845
846RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
847  Thread_Control       *the_thread,
848  Scheduler_Node       *node,
849  const Thread_Control *previous_user_of_node
850)
851{
852  const Scheduler_Control *scheduler =
853    _Scheduler_Get_own( previous_user_of_node );
854
855  the_thread->Scheduler.control = scheduler;
856  _Scheduler_Thread_set_node( the_thread, node );
857}
858
859extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
860
861RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
862  Thread_Control         *the_thread,
863  Thread_Scheduler_state  new_state
864)
865{
866  _Assert(
867    _Scheduler_Thread_state_valid_state_changes
868      [ the_thread->Scheduler.state ][ new_state ]
869  );
870
871  the_thread->Scheduler.state = new_state;
872}
873
874/**
875 * @brief Changes the scheduler help state of a thread.
876 *
877 * @param[in] the_thread The thread.
878 * @param[in] new_help_state The new help state.
879 *
880 * @return The previous help state.
881 */
882RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
883  Thread_Control       *the_thread,
884  Scheduler_Help_state  new_help_state
885)
886{
887  Scheduler_Node *node = _Thread_Scheduler_get_own_node( the_thread );
888  Scheduler_Help_state previous_help_state = node->help_state;
889
890  node->help_state = new_help_state;
891
892  return previous_help_state;
893}
894
895/**
896 * @brief Changes the resource tree root of a thread.
897 *
898 * For each node of the resource sub-tree specified by the top thread the
899 * scheduler asks for help.  So the root thread gains access to all scheduler
900 * nodes corresponding to the resource sub-tree.  In case a thread previously
901 * granted help is displaced by this operation, then the scheduler asks for
902 * help using its remaining resource tree.
903 *
904 * The run-time of this function depends on the size of the resource sub-tree
905 * and other resource trees in case threads in need for help are produced
906 * during this operation.
907 *
908 * @param[in] top The thread specifying the resource sub-tree top.
909 * @param[in] root The thread specifying the new resource sub-tree root.
910 */
911void _Scheduler_Thread_change_resource_root(
912  Thread_Control *top,
913  Thread_Control *root
914);
915
916RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
917  Scheduler_Node *node,
918  Thread_Control *idle
919)
920{
921  _Assert(
922    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
923      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
924  );
925  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
926  _Assert(
927    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
928  );
929
930  _Scheduler_Thread_set_node( idle, node );
931
932  _Scheduler_Node_set_user( node, idle );
933  node->idle = idle;
934}
935
936/**
937 * @brief Use an idle thread for this scheduler node.
938 *
939 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
940 * helping state may use an idle thread for the scheduler node owned by itself
941 * in case it executes currently using another scheduler node or in case it is
942 * in a blocking state.
943 *
944 * @param[in] context The scheduler instance context.
945 * @param[in] node The node which wants to use the idle thread.
946 * @param[in] get_idle_thread Function to get an idle thread.
947 */
948RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
949  Scheduler_Context         *context,
950  Scheduler_Node            *node,
951  Scheduler_Get_idle_thread  get_idle_thread
952)
953{
954  Thread_Control *idle = ( *get_idle_thread )( context );
955
956  _Scheduler_Set_idle_thread( node, idle );
957
958  return idle;
959}
960
961typedef enum {
962  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
963  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
964  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
965} Scheduler_Try_to_schedule_action;
966
967/**
968 * @brief Try to schedule this scheduler node.
969 *
970 * @param[in] context The scheduler instance context.
971 * @param[in] node The node which wants to get scheduled.
972 * @param[in] idle A potential idle thread used by a potential victim node.
973 * @param[in] get_idle_thread Function to get an idle thread.
974 *
975 * @retval true This node can be scheduled.
976 * @retval false Otherwise.
977 */
978RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
979_Scheduler_Try_to_schedule_node(
980  Scheduler_Context         *context,
981  Scheduler_Node            *node,
982  Thread_Control            *idle,
983  Scheduler_Get_idle_thread  get_idle_thread
984)
985{
986  Scheduler_Try_to_schedule_action action;
987  Thread_Control *owner;
988  Thread_Control *user;
989
990  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
991
992  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
993    return action;
994  }
995
996  owner = _Scheduler_Node_get_owner( node );
997  user = _Scheduler_Node_get_user( node );
998
999  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
1000    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1001      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1002    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
1003      if ( idle != NULL ) {
1004        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1005      } else {
1006        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1007      }
1008    } else {
1009      _Scheduler_Node_set_user( node, owner );
1010    }
1011  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1012    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1013      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1014    } else if ( idle != NULL ) {
1015      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1016    } else {
1017      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1018    }
1019  } else {
1020    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1021
1022    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1023      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1024    } else {
1025      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1026    }
1027  }
1028
1029  return action;
1030}
1031
1032/**
1033 * @brief Release an idle thread using this scheduler node.
1034 *
1035 * @param[in] context The scheduler instance context.
1036 * @param[in] node The node which may have an idle thread as user.
1037 * @param[in] release_idle_thread Function to release an idle thread.
1038 *
1039 * @retval idle The idle thread which used this node.
1040 * @retval NULL This node had no idle thread as an user.
1041 */
1042RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1043  Scheduler_Context             *context,
1044  Scheduler_Node                *node,
1045  Scheduler_Release_idle_thread  release_idle_thread
1046)
1047{
1048  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1049
1050  if ( idle != NULL ) {
1051    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1052
1053    node->idle = NULL;
1054    _Scheduler_Node_set_user( node, owner );
1055    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
1056    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
1057
1058    ( *release_idle_thread )( context, idle );
1059  }
1060
1061  return idle;
1062}
1063
1064RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1065  Scheduler_Node *needs_idle,
1066  Scheduler_Node *uses_idle,
1067  Thread_Control *idle
1068)
1069{
1070  uses_idle->idle = NULL;
1071  _Scheduler_Node_set_user(
1072    uses_idle,
1073    _Scheduler_Node_get_owner( uses_idle )
1074  );
1075  _Scheduler_Set_idle_thread( needs_idle, idle );
1076}
1077
1078/**
1079 * @brief Block this scheduler node.
1080 *
1081 * @param[in] context The scheduler instance context.
1082 * @param[in] thread The thread which wants to get blocked referencing this
1083 *   node.  This is not necessarily the user of this node in case the node
1084 *   participates in the scheduler helping protocol.
1085 * @param[in] node The node which wants to get blocked.
1086 * @param[in] is_scheduled This node is scheduled.
1087 * @param[in] get_idle_thread Function to get an idle thread.
1088 *
1089 * @retval true Continue with the blocking operation.
1090 * @retval false Otherwise.
1091 */
1092RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
1093  Scheduler_Context         *context,
1094  Thread_Control            *thread,
1095  Scheduler_Node            *node,
1096  bool                       is_scheduled,
1097  Scheduler_Get_idle_thread  get_idle_thread
1098)
1099{
1100  Thread_Control *old_user;
1101  Thread_Control *new_user;
1102
1103  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1104
1105  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1106    _Assert( thread == _Scheduler_Node_get_user( node ) );
1107
1108    return true;
1109  }
1110
1111  new_user = NULL;
1112
1113  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1114    if ( is_scheduled ) {
1115      _Assert( thread == _Scheduler_Node_get_user( node ) );
1116      old_user = thread;
1117      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1118    }
1119  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
1120    if ( is_scheduled ) {
1121      old_user = _Scheduler_Node_get_user( node );
1122
1123      if ( thread == old_user ) {
1124        Thread_Control *owner = _Scheduler_Node_get_owner( node );
1125
1126        if (
1127          thread != owner
1128            && owner->Scheduler.state == THREAD_SCHEDULER_READY
1129        ) {
1130          new_user = owner;
1131          _Scheduler_Node_set_user( node, new_user );
1132        } else {
1133          new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1134        }
1135      }
1136    }
1137  } else {
1138    /* Not implemented, this is part of the OMIP support path. */
1139    _Assert(0);
1140  }
1141
1142  if ( new_user != NULL ) {
1143    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1144
1145    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1146    _Thread_Set_CPU( new_user, cpu );
1147    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1148  }
1149
1150  return false;
1151}
1152
1153/**
1154 * @brief Unblock this scheduler node.
1155 *
1156 * @param[in] context The scheduler instance context.
1157 * @param[in] the_thread The thread which wants to get unblocked.
1158 * @param[in] node The node which wants to get unblocked.
1159 * @param[in] is_scheduled This node is scheduled.
1160 * @param[in] release_idle_thread Function to release an idle thread.
1161 *
1162 * @retval true Continue with the unblocking operation.
1163 * @retval false Otherwise.
1164 */
1165RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1166  Scheduler_Context             *context,
1167  Thread_Control                *the_thread,
1168  Scheduler_Node                *node,
1169  bool                           is_scheduled,
1170  Scheduler_Release_idle_thread  release_idle_thread
1171)
1172{
1173  bool unblock;
1174
1175  if ( is_scheduled ) {
1176    Thread_Control *old_user = _Scheduler_Node_get_user( node );
1177    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1178    Thread_Control *idle = _Scheduler_Release_idle_thread(
1179      context,
1180      node,
1181      release_idle_thread
1182    );
1183    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1184    Thread_Control *new_user;
1185
1186    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1187      _Assert( idle != NULL );
1188      new_user = the_thread;
1189    } else if ( idle != NULL ) {
1190      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1191      new_user = the_thread;
1192    } else if ( the_thread != owner ) {
1193      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1194      _Assert( old_user != the_thread );
1195      _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
1196      new_user = the_thread;
1197      _Scheduler_Node_set_user( node, new_user );
1198    } else {
1199      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1200      _Assert( old_user != the_thread );
1201      _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1202      new_user = NULL;
1203    }
1204
1205    if ( new_user != NULL ) {
1206      _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1207      _Thread_Set_CPU( new_user, cpu );
1208      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1209    }
1210
1211    unblock = false;
1212  } else {
1213    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1214
1215    unblock = true;
1216  }
1217
1218  return unblock;
1219}
1220
1221/**
1222 * @brief Asks a ready scheduler node for help.
1223 *
1224 * @param[in] node The ready node offering help.
1225 * @param[in] needs_help The thread needing help.
1226 *
1227 * @retval needs_help The thread needing help.
1228 */
1229RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
1230  Scheduler_Node *node,
1231  Thread_Control *needs_help
1232)
1233{
1234  _Scheduler_Node_set_user( node, needs_help );
1235
1236  return needs_help;
1237}
1238
1239/**
1240 * @brief Asks a scheduled scheduler node for help.
1241 *
1242 * @param[in] context The scheduler instance context.
1243 * @param[in] node The scheduled node offering help.
1244 * @param[in] offers_help The thread offering help.
1245 * @param[in] needs_help The thread needing help.
1246 * @param[in] previous_accepts_help The previous thread accepting help by this
1247 *   scheduler node.
1248 * @param[in] release_idle_thread Function to release an idle thread.
1249 *
1250 * @retval needs_help The previous thread accepting help by this scheduler node
1251 *   which was displaced by the thread needing help.
1252 * @retval NULL There are no more threads needing help.
1253 */
1254RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
1255  Scheduler_Context             *context,
1256  Scheduler_Node                *node,
1257  Thread_Control                *offers_help,
1258  Thread_Control                *needs_help,
1259  Thread_Control                *previous_accepts_help,
1260  Scheduler_Release_idle_thread  release_idle_thread
1261)
1262{
1263  Thread_Control *next_needs_help = NULL;
1264  Thread_Control *old_user = NULL;
1265  Thread_Control *new_user = NULL;
1266
1267  if (
1268    previous_accepts_help != needs_help
1269      && _Scheduler_Thread_get_node( previous_accepts_help ) == node
1270  ) {
1271    Thread_Control *idle = _Scheduler_Release_idle_thread(
1272      context,
1273      node,
1274      release_idle_thread
1275    );
1276
1277    if ( idle != NULL ) {
1278      old_user = idle;
1279    } else {
1280      _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
1281      old_user = previous_accepts_help;
1282    }
1283
1284    if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1285      new_user = needs_help;
1286    } else {
1287      _Assert(
1288        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1289          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1290      );
1291      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
1292
1293      new_user = offers_help;
1294    }
1295
1296    if ( previous_accepts_help != offers_help ) {
1297      next_needs_help = previous_accepts_help;
1298    }
1299  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1300    Thread_Control *idle = _Scheduler_Release_idle_thread(
1301      context,
1302      node,
1303      release_idle_thread
1304    );
1305
1306    if ( idle != NULL ) {
1307      old_user = idle;
1308    } else {
1309      old_user = _Scheduler_Node_get_user( node );
1310    }
1311
1312    new_user = needs_help;
1313  } else {
1314    _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
1315  }
1316
1317  if ( new_user != old_user ) {
1318    Per_CPU_Control *cpu_self = _Per_CPU_Get();
1319    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1320
1321    _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
1322    _Scheduler_Thread_set_scheduler_and_node(
1323      old_user,
1324      _Thread_Scheduler_get_own_node( old_user ),
1325      old_user
1326    );
1327
1328    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1329    _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
1330
1331    _Scheduler_Node_set_user( node, new_user );
1332    _Thread_Set_CPU( new_user, cpu );
1333    _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
1334  }
1335
1336  return next_needs_help;
1337}
1338
1339/**
1340 * @brief Asks a blocked scheduler node for help.
1341 *
1342 * @param[in] context The scheduler instance context.
1343 * @param[in] node The scheduled node offering help.
1344 * @param[in] offers_help The thread offering help.
1345 * @param[in] needs_help The thread needing help.
1346 *
1347 * @retval true Enqueue this scheduler node.
1348 * @retval false Otherwise.
1349 */
1350RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
1351  Scheduler_Context *context,
1352  Scheduler_Node    *node,
1353  Thread_Control    *offers_help,
1354  Thread_Control    *needs_help
1355)
1356{
1357  bool enqueue;
1358
1359  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1360
1361  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1362    _Scheduler_Node_set_user( node, needs_help );
1363    _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
1364
1365    enqueue = true;
1366  } else {
1367    enqueue = false;
1368  }
1369
1370  return enqueue;
1371}
1372#endif
1373
1374RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1375  Thread_Control *new_heir,
1376  bool            force_dispatch
1377)
1378{
1379  Thread_Control *heir = _Thread_Heir;
1380
1381  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1382#if defined(RTEMS_SMP)
1383    /*
1384     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1385     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1386     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1387     * schedulers.
1388     */
1389    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1390    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1391#endif
1392    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1393    _Thread_Heir = new_heir;
1394    _Thread_Dispatch_necessary = true;
1395  }
1396}
1397
1398RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1399  const Scheduler_Control *new_scheduler,
1400  Thread_Control          *the_thread,
1401  Priority_Control         priority
1402)
1403{
1404  Scheduler_Node *new_scheduler_node;
1405  Scheduler_Node *old_scheduler_node;
1406
1407  if (
1408    _Thread_Owns_resources( the_thread )
1409      || the_thread->Wait.queue != NULL
1410  ) {
1411    return STATUS_RESOURCE_IN_USE;
1412  }
1413
1414  old_scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1415  _Priority_Plain_extract(
1416    &old_scheduler_node->Wait.Priority,
1417    &the_thread->Real_priority
1418  );
1419
1420  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1421    _Priority_Plain_insert(
1422      &old_scheduler_node->Wait.Priority,
1423      &the_thread->Real_priority,
1424      the_thread->Real_priority.priority
1425    );
1426    return STATUS_RESOURCE_IN_USE;
1427  }
1428
1429#if defined(RTEMS_SMP)
1430  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1431    the_thread,
1432    _Scheduler_Get_index( new_scheduler )
1433  );
1434#else
1435  new_scheduler_node = old_scheduler_node;
1436#endif
1437
1438  the_thread->Start.initial_priority = priority;
1439  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1440  _Priority_Initialize_one(
1441    &new_scheduler_node->Wait.Priority,
1442    &the_thread->Real_priority
1443  );
1444
1445#if defined(RTEMS_SMP)
1446  _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1447  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1448  _Chain_Initialize_one(
1449    &the_thread->Scheduler.Wait_nodes,
1450    &new_scheduler_node->Thread.Wait_node
1451  );
1452
1453  {
1454    const Scheduler_Control *old_scheduler;
1455
1456    old_scheduler = _Scheduler_Get( the_thread );
1457
1458    if ( old_scheduler != new_scheduler ) {
1459      States_Control current_state;
1460
1461      current_state = the_thread->current_state;
1462
1463      if ( _States_Is_ready( current_state ) ) {
1464        _Scheduler_Block( the_thread );
1465      }
1466
1467      the_thread->Scheduler.own_control = new_scheduler;
1468      the_thread->Scheduler.control = new_scheduler;
1469      the_thread->Scheduler.own_node = new_scheduler_node;
1470      the_thread->Scheduler.node = new_scheduler_node;
1471      _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1472
1473      if ( _States_Is_ready( current_state ) ) {
1474        _Scheduler_Unblock( the_thread );
1475      }
1476
1477      return STATUS_SUCCESSFUL;
1478    }
1479  }
1480#endif
1481
1482  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1483  _Scheduler_Update_priority( the_thread );
1484  return STATUS_SUCCESSFUL;
1485}
1486
1487/** @} */
1488
1489#ifdef __cplusplus
1490}
1491#endif
1492
1493#endif
1494/* end of include file */
Note: See TracBrowser for help on using the repository browser.