source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ d097b546

5
Last change on this file since d097b546 was d097b546, checked in by Sebastian Huber <sebastian.huber@…>, on 09/21/16 at 13:17:37

score: Rename scheduler ask for help stuff

Rename the scheduler ask for help stuff since this will be replaced step
by step with a second generation of the scheduler helping protocol.
Keep the old one for now in parallel to reduce the patch set sizes.

Update #2556.

  • Property mode set to 100644
File size: 40.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/cpusetimpl.h>
25#include <rtems/score/priorityimpl.h>
26#include <rtems/score/smpimpl.h>
27#include <rtems/score/status.h>
28#include <rtems/score/threadimpl.h>
29
30#ifdef __cplusplus
31extern "C" {
32#endif
33
34/**
35 * @addtogroup ScoreScheduler
36 */
37/**@{**/
38
39/**
40 *  @brief Initializes the scheduler to the policy chosen by the user.
41 *
42 *  This routine initializes the scheduler to the policy chosen by the user
43 *  through confdefs, or to the priority scheduler with ready chains by
44 *  default.
45 */
46void _Scheduler_Handler_initialization( void );
47
48RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
49  const Scheduler_Control *scheduler
50)
51{
52  return scheduler->context;
53}
54
55RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
56  const Thread_Control *the_thread
57)
58{
59#if defined(RTEMS_SMP)
60  return the_thread->Scheduler.control;
61#else
62  (void) the_thread;
63
64  return &_Scheduler_Table[ 0 ];
65#endif
66}
67
68RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
69  const Thread_Control *the_thread
70)
71{
72#if defined(RTEMS_SMP)
73  return the_thread->Scheduler.own_control;
74#else
75  (void) the_thread;
76
77  return &_Scheduler_Table[ 0 ];
78#endif
79}
80
81RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
82  uint32_t cpu_index
83)
84{
85#if defined(RTEMS_SMP)
86  return _Scheduler_Assignments[ cpu_index ].scheduler;
87#else
88  (void) cpu_index;
89
90  return &_Scheduler_Table[ 0 ];
91#endif
92}
93
94RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
95  const Per_CPU_Control *cpu
96)
97{
98  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
99
100  return _Scheduler_Get_by_CPU_index( cpu_index );
101}
102
103ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
104
105/**
106 * @brief Acquires the scheduler instance inside a critical section (interrupts
107 * disabled).
108 *
109 * @param[in] scheduler The scheduler instance.
110 * @param[in] lock_context The lock context to use for
111 *   _Scheduler_Release_critical().
112 */
113RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
114  const Scheduler_Control *scheduler,
115  ISR_lock_Context        *lock_context
116)
117{
118  (void) scheduler;
119  _ISR_lock_Acquire( &_Scheduler_Lock, lock_context );
120}
121
122/**
123 * @brief Releases the scheduler instance inside a critical section (interrupts
124 * disabled).
125 *
126 * @param[in] scheduler The scheduler instance.
127 * @param[in] lock_context The lock context used for
128 *   _Scheduler_Acquire_critical().
129 */
130RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
131  const Scheduler_Control *scheduler,
132  ISR_lock_Context        *lock_context
133)
134{
135  (void) scheduler;
136  _ISR_lock_Release( &_Scheduler_Lock, lock_context );
137}
138
139/**
140 * The preferred method to add a new scheduler is to define the jump table
141 * entries and add a case to the _Scheduler_Initialize routine.
142 *
143 * Generic scheduling implementations that rely on the ready queue only can
144 * be found in the _Scheduler_queue_XXX functions.
145 */
146
147/*
148 * Passing the Scheduler_Control* to these functions allows for multiple
149 * scheduler's to exist simultaneously, which could be useful on an SMP
150 * system.  Then remote Schedulers may be accessible.  How to protect such
151 * accesses remains an open problem.
152 */
153
154/**
155 * @brief General scheduling decision.
156 *
157 * This kernel routine implements the scheduling decision logic for
158 * the scheduler. It does NOT dispatch.
159 *
160 * @param[in] the_thread The thread which state changed previously.
161 */
162RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
163{
164  const Scheduler_Control *scheduler;
165  ISR_lock_Context         lock_context;
166
167  scheduler = _Scheduler_Get( the_thread );
168  _Scheduler_Acquire_critical( scheduler, &lock_context );
169
170  ( *scheduler->Operations.schedule )( scheduler, the_thread );
171
172  _Scheduler_Release_critical( scheduler, &lock_context );
173}
174
175#if defined(RTEMS_SMP)
176typedef struct {
177  Thread_Control *needs_help;
178  Thread_Control *next_needs_help;
179} Scheduler_Ask_for_help_context ;
180
181RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
182  Resource_Node *resource_node,
183  void          *arg
184)
185{
186  bool done;
187  Scheduler_Ask_for_help_context *help_context = arg;
188  Thread_Control *previous_needs_help = help_context->needs_help;
189  Thread_Control *next_needs_help;
190  Thread_Control *offers_help =
191    THREAD_RESOURCE_NODE_TO_THREAD( resource_node );
192  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
193
194  next_needs_help = ( *scheduler->Operations.ask_for_help_X )(
195    scheduler,
196    offers_help,
197    previous_needs_help
198  );
199
200  done = next_needs_help != previous_needs_help;
201
202  if ( done ) {
203    help_context->next_needs_help = next_needs_help;
204  }
205
206  return done;
207}
208
209/**
210 * @brief Ask threads depending on resources owned by the thread for help.
211 *
212 * A thread is in need for help if it lost its assigned processor due to
213 * pre-emption by a higher priority thread or it was not possible to assign it
214 * a processor since its priority is to low on its current scheduler instance.
215 *
216 * The run-time of this function depends on the size of the resource tree of
217 * the thread needing help and other resource trees in case threads in need for
218 * help are produced during this operation.
219 *
220 * @param[in] needs_help The thread needing help.
221 */
222RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_X(
223  Thread_Control *needs_help
224)
225{
226  do {
227    const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );
228
229    needs_help = ( *scheduler->Operations.ask_for_help_X )(
230      scheduler,
231      needs_help,
232      needs_help
233    );
234
235    if ( needs_help != NULL ) {
236      Scheduler_Ask_for_help_context help_context = { needs_help, NULL };
237
238      _Resource_Iterate(
239        &needs_help->Resource_node,
240        _Scheduler_Ask_for_help_visitor,
241        &help_context
242      );
243
244      needs_help = help_context.next_needs_help;
245    }
246  } while ( needs_help != NULL );
247}
248
249RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
250  Thread_Control *needs_help
251)
252{
253  if (
254    needs_help != NULL
255      && _Resource_Node_owns_resources( &needs_help->Resource_node )
256  ) {
257    Scheduler_Node *node = _Thread_Scheduler_get_own_node( needs_help );
258
259    if (
260      node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
261        || _Scheduler_Node_get_user( node ) != needs_help
262    ) {
263      _Scheduler_Ask_for_help_X( needs_help );
264    }
265  }
266}
267#endif
268
269/**
270 * @brief Scheduler yield with a particular thread.
271 *
272 * This routine is invoked when a thread wishes to voluntarily transfer control
273 * of the processor to another thread.
274 *
275 * @param[in] the_thread The yielding thread.
276 */
277RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
278{
279  const Scheduler_Control *scheduler;
280  ISR_lock_Context         lock_context;
281#if defined(RTEMS_SMP)
282  Thread_Control          *needs_help;
283#endif
284
285  scheduler = _Scheduler_Get( the_thread );
286  _Scheduler_Acquire_critical( scheduler, &lock_context );
287
288#if defined(RTEMS_SMP)
289  needs_help =
290#endif
291  ( *scheduler->Operations.yield )( scheduler, the_thread );
292
293#if defined(RTEMS_SMP)
294  _Scheduler_Ask_for_help_if_necessary( needs_help );
295#endif
296
297  _Scheduler_Release_critical( scheduler, &lock_context );
298}
299
300/**
301 * @brief Blocks a thread with respect to the scheduler.
302 *
303 * This routine removes @a the_thread from the scheduling decision for
304 * the scheduler. The primary task is to remove the thread from the
305 * ready queue.  It performs any necessary schedulering operations
306 * including the selection of a new heir thread.
307 *
308 * @param[in] the_thread The thread.
309 */
310RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
311{
312  const Scheduler_Control *scheduler;
313  ISR_lock_Context         lock_context;
314
315  scheduler = _Scheduler_Get( the_thread );
316  _Scheduler_Acquire_critical( scheduler, &lock_context );
317
318  ( *scheduler->Operations.block )( scheduler, the_thread );
319
320  _Scheduler_Release_critical( scheduler, &lock_context );
321}
322
323/**
324 * @brief Unblocks a thread with respect to the scheduler.
325 *
326 * This operation must fetch the latest thread priority value for this
327 * scheduler instance and update its internal state if necessary.
328 *
329 * @param[in] the_thread The thread.
330 *
331 * @see _Scheduler_Node_get_priority().
332 */
333RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
334{
335  const Scheduler_Control *scheduler;
336  ISR_lock_Context         lock_context;
337#if defined(RTEMS_SMP)
338  Thread_Control          *needs_help;
339#endif
340
341  scheduler = _Scheduler_Get( the_thread );
342  _Scheduler_Acquire_critical( scheduler, &lock_context );
343
344#if defined(RTEMS_SMP)
345  needs_help =
346#endif
347  ( *scheduler->Operations.unblock )( scheduler, the_thread );
348
349#if defined(RTEMS_SMP)
350  _Scheduler_Ask_for_help_if_necessary( needs_help );
351#endif
352
353  _Scheduler_Release_critical( scheduler, &lock_context );
354}
355
356/**
357 * @brief Propagates a priority change of a thread to the scheduler.
358 *
359 * On uni-processor configurations, this operation must evaluate the thread
360 * state.  In case the thread is not ready, then the priority update should be
361 * deferred to the next scheduler unblock operation.
362 *
363 * The operation must update the heir and thread dispatch necessary variables
364 * in case the set of scheduled threads changes.
365 *
366 * @param[in] the_thread The thread changing its priority.
367 *
368 * @see _Scheduler_Node_get_priority().
369 */
370RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
371{
372  const Scheduler_Control *own_scheduler;
373  ISR_lock_Context         lock_context;
374#if defined(RTEMS_SMP)
375  Thread_Control          *needs_help;
376#endif
377
378  own_scheduler = _Scheduler_Get_own( the_thread );
379  _Scheduler_Acquire_critical( own_scheduler, &lock_context );
380
381#if defined(RTEMS_SMP)
382  needs_help =
383#endif
384  ( *own_scheduler->Operations.update_priority )( own_scheduler, the_thread );
385
386#if defined(RTEMS_SMP)
387  _Scheduler_Ask_for_help_if_necessary( needs_help );
388#endif
389
390  _Scheduler_Release_critical( own_scheduler, &lock_context );
391}
392
393/**
394 * @brief Maps a thread priority from the user domain to the scheduler domain.
395 *
396 * Let M be the maximum scheduler priority.  The mapping must be bijective in
397 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
398 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
399 * other values the mapping is undefined.
400 *
401 * @param[in] scheduler The scheduler instance.
402 * @param[in] priority The user domain thread priority.
403 *
404 * @return The corresponding thread priority of the scheduler domain is returned.
405 */
406RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
407  const Scheduler_Control *scheduler,
408  Priority_Control         priority
409)
410{
411  return ( *scheduler->Operations.map_priority )( scheduler, priority );
412}
413
414/**
415 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
416 *
417 * @param[in] scheduler The scheduler instance.
418 * @param[in] priority The scheduler domain thread priority.
419 *
420 * @return The corresponding thread priority of the user domain is returned.
421 */
422RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
423  const Scheduler_Control *scheduler,
424  Priority_Control         priority
425)
426{
427  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
428}
429
430/**
431 * @brief Initializes a scheduler node.
432 *
433 * The scheduler node contains arbitrary data on function entry.  The caller
434 * must ensure that _Scheduler_Node_destroy() will be called after a
435 * _Scheduler_Node_initialize() before the memory of the scheduler node is
436 * destroyed.
437 *
438 * @param[in] scheduler The scheduler instance.
439 * @param[in] node The scheduler node to initialize.
440 * @param[in] the_thread The thread of the scheduler node to initialize.
441 * @param[in] priority The thread priority.
442 */
443RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
444  const Scheduler_Control *scheduler,
445  Scheduler_Node          *node,
446  Thread_Control          *the_thread,
447  Priority_Control         priority
448)
449{
450  ( *scheduler->Operations.node_initialize )(
451    scheduler,
452    node,
453    the_thread,
454    priority
455  );
456}
457
458/**
459 * @brief Destroys a scheduler node.
460 *
461 * The caller must ensure that _Scheduler_Node_destroy() will be called only
462 * after a corresponding _Scheduler_Node_initialize().
463 *
464 * @param[in] scheduler The scheduler instance.
465 * @param[in] node The scheduler node to destroy.
466 */
467RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
468  const Scheduler_Control *scheduler,
469  Scheduler_Node          *node
470)
471{
472  ( *scheduler->Operations.node_destroy )( scheduler, node );
473}
474
475/**
476 * @brief Releases a job of a thread with respect to the scheduler.
477 *
478 * @param[in] the_thread The thread.
479 * @param[in] priority_node The priority node of the job.
480 * @param[in] deadline The deadline in watchdog ticks since boot.
481 * @param[in] queue_context The thread queue context to provide the set of
482 *   threads for _Thread_Priority_update().
483 */
484RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
485  Thread_Control       *the_thread,
486  Priority_Node        *priority_node,
487  uint64_t              deadline,
488  Thread_queue_Context *queue_context
489)
490{
491  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
492
493  _Thread_queue_Context_clear_priority_updates( queue_context );
494  ( *scheduler->Operations.release_job )(
495    scheduler,
496    the_thread,
497    priority_node,
498    deadline,
499    queue_context
500  );
501}
502
503/**
504 * @brief Cancels a job of a thread with respect to the scheduler.
505 *
506 * @param[in] the_thread The thread.
507 * @param[in] priority_node The priority node of the job.
508 * @param[in] queue_context The thread queue context to provide the set of
509 *   threads for _Thread_Priority_update().
510 */
511RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
512  Thread_Control       *the_thread,
513  Priority_Node        *priority_node,
514  Thread_queue_Context *queue_context
515)
516{
517  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
518
519  _Thread_queue_Context_clear_priority_updates( queue_context );
520  ( *scheduler->Operations.cancel_job )(
521    scheduler,
522    the_thread,
523    priority_node,
524    queue_context
525  );
526}
527
528/**
529 * @brief Scheduler method invoked at each clock tick.
530 *
531 * This method is invoked at each clock tick to allow the scheduler
532 * implementation to perform any activities required.  For the
533 * scheduler which support standard RTEMS features, this includes
534 * time-slicing management.
535 */
536RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
537{
538  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
539  Thread_Control *executing = cpu->executing;
540
541  if ( scheduler != NULL && executing != NULL ) {
542    ( *scheduler->Operations.tick )( scheduler, executing );
543  }
544}
545
546/**
547 * @brief Starts the idle thread for a particular processor.
548 *
549 * @param[in] scheduler The scheduler instance.
550 * @param[in,out] the_thread The idle thread for the processor.
551 * @param[in,out] cpu The processor for the idle thread.
552 *
553 * @see _Thread_Create_idle().
554 */
555RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
556  const Scheduler_Control *scheduler,
557  Thread_Control          *the_thread,
558  Per_CPU_Control         *cpu
559)
560{
561  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
562}
563
564#if defined(RTEMS_SMP)
565RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
566  uint32_t cpu_index
567)
568{
569  return &_Scheduler_Assignments[ cpu_index ];
570}
571
572RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
573  const Scheduler_Assignment *assignment
574)
575{
576  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
577}
578
579RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
580  const Scheduler_Assignment *assignment
581)
582{
583  return assignment->scheduler != NULL;
584}
585#endif /* defined(RTEMS_SMP) */
586
587RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
588  const Scheduler_Control *scheduler,
589  uint32_t cpu_index
590)
591{
592#if defined(RTEMS_SMP)
593  const Scheduler_Assignment *assignment =
594    _Scheduler_Get_assignment( cpu_index );
595
596  return assignment->scheduler == scheduler;
597#else
598  (void) scheduler;
599  (void) cpu_index;
600
601  return true;
602#endif
603}
604
605#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
606
607RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
608  const Scheduler_Control *scheduler,
609  size_t                   cpusetsize,
610  cpu_set_t               *cpuset
611)
612{
613  uint32_t cpu_count = _SMP_Get_processor_count();
614  uint32_t cpu_index;
615
616  CPU_ZERO_S( cpusetsize, cpuset );
617
618  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
619#if defined(RTEMS_SMP)
620    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
621      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
622    }
623#else
624    (void) scheduler;
625
626    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
627#endif
628  }
629}
630
631RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
632  const Scheduler_Control *scheduler,
633  Thread_Control          *the_thread,
634  size_t                   cpusetsize,
635  cpu_set_t               *cpuset
636)
637{
638  (void) the_thread;
639
640  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
641
642  return true;
643}
644
645bool _Scheduler_Get_affinity(
646  Thread_Control *the_thread,
647  size_t          cpusetsize,
648  cpu_set_t      *cpuset
649);
650
651RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
652  const Scheduler_Control *scheduler,
653  Thread_Control          *the_thread,
654  size_t                   cpusetsize,
655  const cpu_set_t         *cpuset
656)
657{
658  uint32_t cpu_count = _SMP_Get_processor_count();
659  uint32_t cpu_index;
660  bool     ok = true;
661
662  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
663#if defined(RTEMS_SMP)
664    const Scheduler_Control *scheduler_of_cpu =
665      _Scheduler_Get_by_CPU_index( cpu_index );
666
667    ok = ok
668      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
669        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
670          && scheduler != scheduler_of_cpu ) );
671#else
672    (void) scheduler;
673
674    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
675#endif
676  }
677
678  return ok;
679}
680
681bool _Scheduler_Set_affinity(
682  Thread_Control  *the_thread,
683  size_t           cpusetsize,
684  const cpu_set_t *cpuset
685);
686
687#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
688
689RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
690  const Scheduler_Control *scheduler,
691  Thread_Control          *the_thread,
692  void                  ( *extract )(
693                             const Scheduler_Control *,
694                             Thread_Control * ),
695  void                  ( *schedule )(
696                             const Scheduler_Control *,
697                             Thread_Control *,
698                             bool )
699)
700{
701  ( *extract )( scheduler, the_thread );
702
703  /* TODO: flash critical section? */
704
705  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
706    ( *schedule )( scheduler, the_thread, true );
707  }
708}
709
710RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
711  const Scheduler_Control *scheduler
712)
713{
714#if defined(RTEMS_SMP)
715  return _Scheduler_Get_context( scheduler )->processor_count;
716#else
717  (void) scheduler;
718
719  return 1;
720#endif
721}
722
723RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
724{
725  return _Objects_Build_id(
726    OBJECTS_FAKE_OBJECTS_API,
727    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
728    _Objects_Local_node,
729    (uint16_t) ( scheduler_index + 1 )
730  );
731}
732
733RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
734{
735  uint32_t minimum_id = _Scheduler_Build_id( 0 );
736
737  return id - minimum_id;
738}
739
740RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
741  Objects_Id                id,
742  const Scheduler_Control **scheduler_p
743)
744{
745  uint32_t index = _Scheduler_Get_index_by_id( id );
746  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
747
748  *scheduler_p = scheduler;
749
750  return index < _Scheduler_Count
751    && _Scheduler_Get_processor_count( scheduler ) > 0;
752}
753
754RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
755{
756  const Scheduler_Control *scheduler;
757  bool ok = _Scheduler_Get_by_id( id, &scheduler );
758
759  (void) scheduler;
760
761  return ok;
762}
763
764RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
765  const Scheduler_Control *scheduler
766)
767{
768  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
769}
770
771RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
772  const Thread_Control *the_thread
773)
774{
775#if defined(RTEMS_SMP)
776  return the_thread->Scheduler.node;
777#else
778  return the_thread->Scheduler.nodes;
779#endif
780}
781
782RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
783  Thread_Control   *the_thread,
784  Priority_Control  new_priority,
785  bool              prepend_it
786)
787{
788  Scheduler_Node *scheduler_node;
789
790  scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
791  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
792}
793
794#if defined(RTEMS_SMP)
795/**
796 * @brief Gets an idle thread from the scheduler instance.
797 *
798 * @param[in] context The scheduler instance context.
799 *
800 * @retval idle An idle thread for use.  This function must always return an
801 * idle thread.  If none is available, then this is a fatal error.
802 */
803typedef Thread_Control *( *Scheduler_Get_idle_thread )(
804  Scheduler_Context *context
805);
806
807/**
808 * @brief Releases an idle thread to the scheduler instance for reuse.
809 *
810 * @param[in] context The scheduler instance context.
811 * @param[in] idle The idle thread to release
812 */
813typedef void ( *Scheduler_Release_idle_thread )(
814  Scheduler_Context *context,
815  Thread_Control    *idle
816);
817
818RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
819  Thread_Control *the_thread,
820  Scheduler_Node *node
821)
822{
823  the_thread->Scheduler.node = node;
824}
825
826RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
827  Thread_Control       *the_thread,
828  Scheduler_Node       *node,
829  const Thread_Control *previous_user_of_node
830)
831{
832  const Scheduler_Control *scheduler =
833    _Scheduler_Get_own( previous_user_of_node );
834
835  the_thread->Scheduler.control = scheduler;
836  _Scheduler_Thread_set_node( the_thread, node );
837}
838
839extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
840
841RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
842  Thread_Control         *the_thread,
843  Thread_Scheduler_state  new_state
844)
845{
846  _Assert(
847    _Scheduler_Thread_state_valid_state_changes
848      [ the_thread->Scheduler.state ][ new_state ]
849  );
850
851  the_thread->Scheduler.state = new_state;
852}
853
854/**
855 * @brief Changes the scheduler help state of a thread.
856 *
857 * @param[in] the_thread The thread.
858 * @param[in] new_help_state The new help state.
859 *
860 * @return The previous help state.
861 */
862RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
863  Thread_Control       *the_thread,
864  Scheduler_Help_state  new_help_state
865)
866{
867  Scheduler_Node *node = _Thread_Scheduler_get_own_node( the_thread );
868  Scheduler_Help_state previous_help_state = node->help_state;
869
870  node->help_state = new_help_state;
871
872  return previous_help_state;
873}
874
875/**
876 * @brief Changes the resource tree root of a thread.
877 *
878 * For each node of the resource sub-tree specified by the top thread the
879 * scheduler asks for help.  So the root thread gains access to all scheduler
880 * nodes corresponding to the resource sub-tree.  In case a thread previously
881 * granted help is displaced by this operation, then the scheduler asks for
882 * help using its remaining resource tree.
883 *
884 * The run-time of this function depends on the size of the resource sub-tree
885 * and other resource trees in case threads in need for help are produced
886 * during this operation.
887 *
888 * @param[in] top The thread specifying the resource sub-tree top.
889 * @param[in] root The thread specifying the new resource sub-tree root.
890 */
891void _Scheduler_Thread_change_resource_root(
892  Thread_Control *top,
893  Thread_Control *root
894);
895
896RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
897  Scheduler_Node *node,
898  Thread_Control *idle
899)
900{
901  _Assert(
902    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
903      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
904  );
905  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
906  _Assert(
907    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
908  );
909
910  _Scheduler_Thread_set_node( idle, node );
911
912  _Scheduler_Node_set_user( node, idle );
913  node->idle = idle;
914}
915
916/**
917 * @brief Use an idle thread for this scheduler node.
918 *
919 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
920 * helping state may use an idle thread for the scheduler node owned by itself
921 * in case it executes currently using another scheduler node or in case it is
922 * in a blocking state.
923 *
924 * @param[in] context The scheduler instance context.
925 * @param[in] node The node which wants to use the idle thread.
926 * @param[in] get_idle_thread Function to get an idle thread.
927 */
928RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
929  Scheduler_Context         *context,
930  Scheduler_Node            *node,
931  Scheduler_Get_idle_thread  get_idle_thread
932)
933{
934  Thread_Control *idle = ( *get_idle_thread )( context );
935
936  _Scheduler_Set_idle_thread( node, idle );
937
938  return idle;
939}
940
941typedef enum {
942  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
943  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
944  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
945} Scheduler_Try_to_schedule_action;
946
947/**
948 * @brief Try to schedule this scheduler node.
949 *
950 * @param[in] context The scheduler instance context.
951 * @param[in] node The node which wants to get scheduled.
952 * @param[in] idle A potential idle thread used by a potential victim node.
953 * @param[in] get_idle_thread Function to get an idle thread.
954 *
955 * @retval true This node can be scheduled.
956 * @retval false Otherwise.
957 */
958RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
959_Scheduler_Try_to_schedule_node(
960  Scheduler_Context         *context,
961  Scheduler_Node            *node,
962  Thread_Control            *idle,
963  Scheduler_Get_idle_thread  get_idle_thread
964)
965{
966  Scheduler_Try_to_schedule_action action;
967  Thread_Control *owner;
968  Thread_Control *user;
969
970  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
971
972  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
973    return action;
974  }
975
976  owner = _Scheduler_Node_get_owner( node );
977  user = _Scheduler_Node_get_user( node );
978
979  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
980    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
981      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
982    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
983      if ( idle != NULL ) {
984        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
985      } else {
986        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
987      }
988    } else {
989      _Scheduler_Node_set_user( node, owner );
990    }
991  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
992    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
993      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
994    } else if ( idle != NULL ) {
995      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
996    } else {
997      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
998    }
999  } else {
1000    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1001
1002    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1003      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1004    } else {
1005      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1006    }
1007  }
1008
1009  return action;
1010}
1011
1012/**
1013 * @brief Release an idle thread using this scheduler node.
1014 *
1015 * @param[in] context The scheduler instance context.
1016 * @param[in] node The node which may have an idle thread as user.
1017 * @param[in] release_idle_thread Function to release an idle thread.
1018 *
1019 * @retval idle The idle thread which used this node.
1020 * @retval NULL This node had no idle thread as an user.
1021 */
1022RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1023  Scheduler_Context             *context,
1024  Scheduler_Node                *node,
1025  Scheduler_Release_idle_thread  release_idle_thread
1026)
1027{
1028  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1029
1030  if ( idle != NULL ) {
1031    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1032
1033    node->idle = NULL;
1034    _Scheduler_Node_set_user( node, owner );
1035    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
1036    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
1037
1038    ( *release_idle_thread )( context, idle );
1039  }
1040
1041  return idle;
1042}
1043
1044RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1045  Scheduler_Node *needs_idle,
1046  Scheduler_Node *uses_idle,
1047  Thread_Control *idle
1048)
1049{
1050  uses_idle->idle = NULL;
1051  _Scheduler_Node_set_user(
1052    uses_idle,
1053    _Scheduler_Node_get_owner( uses_idle )
1054  );
1055  _Scheduler_Set_idle_thread( needs_idle, idle );
1056}
1057
1058/**
1059 * @brief Block this scheduler node.
1060 *
1061 * @param[in] context The scheduler instance context.
1062 * @param[in] thread The thread which wants to get blocked referencing this
1063 *   node.  This is not necessarily the user of this node in case the node
1064 *   participates in the scheduler helping protocol.
1065 * @param[in] node The node which wants to get blocked.
1066 * @param[in] is_scheduled This node is scheduled.
1067 * @param[in] get_idle_thread Function to get an idle thread.
1068 *
1069 * @retval true Continue with the blocking operation.
1070 * @retval false Otherwise.
1071 */
1072RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
1073  Scheduler_Context         *context,
1074  Thread_Control            *thread,
1075  Scheduler_Node            *node,
1076  bool                       is_scheduled,
1077  Scheduler_Get_idle_thread  get_idle_thread
1078)
1079{
1080  Thread_Control *old_user;
1081  Thread_Control *new_user;
1082
1083  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1084
1085  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1086    _Assert( thread == _Scheduler_Node_get_user( node ) );
1087
1088    return true;
1089  }
1090
1091  new_user = NULL;
1092
1093  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1094    if ( is_scheduled ) {
1095      _Assert( thread == _Scheduler_Node_get_user( node ) );
1096      old_user = thread;
1097      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1098    }
1099  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
1100    if ( is_scheduled ) {
1101      old_user = _Scheduler_Node_get_user( node );
1102
1103      if ( thread == old_user ) {
1104        Thread_Control *owner = _Scheduler_Node_get_owner( node );
1105
1106        if (
1107          thread != owner
1108            && owner->Scheduler.state == THREAD_SCHEDULER_READY
1109        ) {
1110          new_user = owner;
1111          _Scheduler_Node_set_user( node, new_user );
1112        } else {
1113          new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1114        }
1115      }
1116    }
1117  } else {
1118    /* Not implemented, this is part of the OMIP support path. */
1119    _Assert(0);
1120  }
1121
1122  if ( new_user != NULL ) {
1123    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1124
1125    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1126    _Thread_Set_CPU( new_user, cpu );
1127    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1128  }
1129
1130  return false;
1131}
1132
1133/**
1134 * @brief Unblock this scheduler node.
1135 *
1136 * @param[in] context The scheduler instance context.
1137 * @param[in] the_thread The thread which wants to get unblocked.
1138 * @param[in] node The node which wants to get unblocked.
1139 * @param[in] is_scheduled This node is scheduled.
1140 * @param[in] release_idle_thread Function to release an idle thread.
1141 *
1142 * @retval true Continue with the unblocking operation.
1143 * @retval false Otherwise.
1144 */
1145RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1146  Scheduler_Context             *context,
1147  Thread_Control                *the_thread,
1148  Scheduler_Node                *node,
1149  bool                           is_scheduled,
1150  Scheduler_Release_idle_thread  release_idle_thread
1151)
1152{
1153  bool unblock;
1154
1155  if ( is_scheduled ) {
1156    Thread_Control *old_user = _Scheduler_Node_get_user( node );
1157    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1158    Thread_Control *idle = _Scheduler_Release_idle_thread(
1159      context,
1160      node,
1161      release_idle_thread
1162    );
1163    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1164    Thread_Control *new_user;
1165
1166    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1167      _Assert( idle != NULL );
1168      new_user = the_thread;
1169    } else if ( idle != NULL ) {
1170      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1171      new_user = the_thread;
1172    } else if ( the_thread != owner ) {
1173      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1174      _Assert( old_user != the_thread );
1175      _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
1176      new_user = the_thread;
1177      _Scheduler_Node_set_user( node, new_user );
1178    } else {
1179      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1180      _Assert( old_user != the_thread );
1181      _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1182      new_user = NULL;
1183    }
1184
1185    if ( new_user != NULL ) {
1186      _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1187      _Thread_Set_CPU( new_user, cpu );
1188      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1189    }
1190
1191    unblock = false;
1192  } else {
1193    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1194
1195    unblock = true;
1196  }
1197
1198  return unblock;
1199}
1200
1201/**
1202 * @brief Asks a ready scheduler node for help.
1203 *
1204 * @param[in] node The ready node offering help.
1205 * @param[in] needs_help The thread needing help.
1206 *
1207 * @retval needs_help The thread needing help.
1208 */
1209RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
1210  Scheduler_Node *node,
1211  Thread_Control *needs_help
1212)
1213{
1214  _Scheduler_Node_set_user( node, needs_help );
1215
1216  return needs_help;
1217}
1218
1219/**
1220 * @brief Asks a scheduled scheduler node for help.
1221 *
1222 * @param[in] context The scheduler instance context.
1223 * @param[in] node The scheduled node offering help.
1224 * @param[in] offers_help The thread offering help.
1225 * @param[in] needs_help The thread needing help.
1226 * @param[in] previous_accepts_help The previous thread accepting help by this
1227 *   scheduler node.
1228 * @param[in] release_idle_thread Function to release an idle thread.
1229 *
1230 * @retval needs_help The previous thread accepting help by this scheduler node
1231 *   which was displaced by the thread needing help.
1232 * @retval NULL There are no more threads needing help.
1233 */
1234RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
1235  Scheduler_Context             *context,
1236  Scheduler_Node                *node,
1237  Thread_Control                *offers_help,
1238  Thread_Control                *needs_help,
1239  Thread_Control                *previous_accepts_help,
1240  Scheduler_Release_idle_thread  release_idle_thread
1241)
1242{
1243  Thread_Control *next_needs_help = NULL;
1244  Thread_Control *old_user = NULL;
1245  Thread_Control *new_user = NULL;
1246
1247  if (
1248    previous_accepts_help != needs_help
1249      && _Scheduler_Thread_get_node( previous_accepts_help ) == node
1250  ) {
1251    Thread_Control *idle = _Scheduler_Release_idle_thread(
1252      context,
1253      node,
1254      release_idle_thread
1255    );
1256
1257    if ( idle != NULL ) {
1258      old_user = idle;
1259    } else {
1260      _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
1261      old_user = previous_accepts_help;
1262    }
1263
1264    if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1265      new_user = needs_help;
1266    } else {
1267      _Assert(
1268        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1269          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1270      );
1271      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
1272
1273      new_user = offers_help;
1274    }
1275
1276    if ( previous_accepts_help != offers_help ) {
1277      next_needs_help = previous_accepts_help;
1278    }
1279  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1280    Thread_Control *idle = _Scheduler_Release_idle_thread(
1281      context,
1282      node,
1283      release_idle_thread
1284    );
1285
1286    if ( idle != NULL ) {
1287      old_user = idle;
1288    } else {
1289      old_user = _Scheduler_Node_get_user( node );
1290    }
1291
1292    new_user = needs_help;
1293  } else {
1294    _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
1295  }
1296
1297  if ( new_user != old_user ) {
1298    Per_CPU_Control *cpu_self = _Per_CPU_Get();
1299    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1300
1301    _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
1302    _Scheduler_Thread_set_scheduler_and_node(
1303      old_user,
1304      _Thread_Scheduler_get_own_node( old_user ),
1305      old_user
1306    );
1307
1308    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1309    _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
1310
1311    _Scheduler_Node_set_user( node, new_user );
1312    _Thread_Set_CPU( new_user, cpu );
1313    _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
1314  }
1315
1316  return next_needs_help;
1317}
1318
1319/**
1320 * @brief Asks a blocked scheduler node for help.
1321 *
1322 * @param[in] context The scheduler instance context.
1323 * @param[in] node The scheduled node offering help.
1324 * @param[in] offers_help The thread offering help.
1325 * @param[in] needs_help The thread needing help.
1326 *
1327 * @retval true Enqueue this scheduler node.
1328 * @retval false Otherwise.
1329 */
1330RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
1331  Scheduler_Context *context,
1332  Scheduler_Node    *node,
1333  Thread_Control    *offers_help,
1334  Thread_Control    *needs_help
1335)
1336{
1337  bool enqueue;
1338
1339  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1340
1341  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1342    _Scheduler_Node_set_user( node, needs_help );
1343    _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
1344
1345    enqueue = true;
1346  } else {
1347    enqueue = false;
1348  }
1349
1350  return enqueue;
1351}
1352#endif
1353
1354RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1355  Thread_Control *new_heir,
1356  bool            force_dispatch
1357)
1358{
1359  Thread_Control *heir = _Thread_Heir;
1360
1361  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1362#if defined(RTEMS_SMP)
1363    /*
1364     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1365     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1366     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1367     * schedulers.
1368     */
1369    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1370    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1371#endif
1372    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1373    _Thread_Heir = new_heir;
1374    _Thread_Dispatch_necessary = true;
1375  }
1376}
1377
1378RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1379  const Scheduler_Control *new_scheduler,
1380  Thread_Control          *the_thread,
1381  Priority_Control         priority
1382)
1383{
1384  Scheduler_Node *new_scheduler_node;
1385  Scheduler_Node *old_scheduler_node;
1386
1387  if (
1388    _Thread_Owns_resources( the_thread )
1389      || the_thread->Wait.queue != NULL
1390  ) {
1391    return STATUS_RESOURCE_IN_USE;
1392  }
1393
1394  old_scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1395  _Priority_Plain_extract(
1396    &old_scheduler_node->Wait.Priority,
1397    &the_thread->Real_priority
1398  );
1399
1400  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1401    _Priority_Plain_insert(
1402      &old_scheduler_node->Wait.Priority,
1403      &the_thread->Real_priority,
1404      the_thread->Real_priority.priority
1405    );
1406    return STATUS_RESOURCE_IN_USE;
1407  }
1408
1409#if defined(RTEMS_SMP)
1410  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1411    the_thread,
1412    _Scheduler_Get_index( new_scheduler )
1413  );
1414#else
1415  new_scheduler_node = old_scheduler_node;
1416#endif
1417
1418  the_thread->Start.initial_priority = priority;
1419  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1420  _Priority_Initialize_one(
1421    &new_scheduler_node->Wait.Priority,
1422    &the_thread->Real_priority
1423  );
1424
1425#if defined(RTEMS_SMP)
1426  _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1427  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1428  _Chain_Initialize_one(
1429    &the_thread->Scheduler.Wait_nodes,
1430    &new_scheduler_node->Thread.Wait_node
1431  );
1432
1433  {
1434    const Scheduler_Control *old_scheduler;
1435
1436    old_scheduler = _Scheduler_Get( the_thread );
1437
1438    if ( old_scheduler != new_scheduler ) {
1439      States_Control current_state;
1440
1441      current_state = the_thread->current_state;
1442
1443      if ( _States_Is_ready( current_state ) ) {
1444        _Scheduler_Block( the_thread );
1445      }
1446
1447      the_thread->Scheduler.own_control = new_scheduler;
1448      the_thread->Scheduler.control = new_scheduler;
1449      the_thread->Scheduler.own_node = new_scheduler_node;
1450      the_thread->Scheduler.node = new_scheduler_node;
1451      _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1452
1453      if ( _States_Is_ready( current_state ) ) {
1454        _Scheduler_Unblock( the_thread );
1455      }
1456
1457      return STATUS_SUCCESSFUL;
1458    }
1459  }
1460#endif
1461
1462  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1463  _Scheduler_Update_priority( the_thread );
1464  return STATUS_SUCCESSFUL;
1465}
1466
1467/** @} */
1468
1469#ifdef __cplusplus
1470}
1471#endif
1472
1473#endif
1474/* end of include file */
Note: See TracBrowser for help on using the repository browser.