source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ c0bd006

5
Last change on this file since c0bd006 was c0bd006, checked in by Sebastian Huber <sebastian.huber@…>, on 06/30/16 at 12:08:18

rtems: Fix rtems_task_set_scheduler() API

Task priorities are only valid within a scheduler instance. The
rtems_task_set_scheduler() directive moves a task from one scheduler
instance to another using the current priority of the thread. However,
the current task priority of the source scheduler instance is undefined
in the target scheduler instance. Add a third parameter to specify the
priority.

Close #2749.

  • Property mode set to 100644
File size: 40.0 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/cpusetimpl.h>
25#include <rtems/score/smpimpl.h>
26#include <rtems/score/status.h>
27#include <rtems/score/threadimpl.h>
28
29#ifdef __cplusplus
30extern "C" {
31#endif
32
33/**
34 * @addtogroup ScoreScheduler
35 */
36/**@{**/
37
38/**
39 *  @brief Initializes the scheduler to the policy chosen by the user.
40 *
41 *  This routine initializes the scheduler to the policy chosen by the user
42 *  through confdefs, or to the priority scheduler with ready chains by
43 *  default.
44 */
45void _Scheduler_Handler_initialization( void );
46
47RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
48  const Scheduler_Control *scheduler
49)
50{
51  return scheduler->context;
52}
53
54RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
55  const Thread_Control *the_thread
56)
57{
58#if defined(RTEMS_SMP)
59  return the_thread->Scheduler.control;
60#else
61  (void) the_thread;
62
63  return &_Scheduler_Table[ 0 ];
64#endif
65}
66
67RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
68  const Thread_Control *the_thread
69)
70{
71#if defined(RTEMS_SMP)
72  return the_thread->Scheduler.own_control;
73#else
74  (void) the_thread;
75
76  return &_Scheduler_Table[ 0 ];
77#endif
78}
79
80RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
81  uint32_t cpu_index
82)
83{
84#if defined(RTEMS_SMP)
85  return _Scheduler_Assignments[ cpu_index ].scheduler;
86#else
87  (void) cpu_index;
88
89  return &_Scheduler_Table[ 0 ];
90#endif
91}
92
93RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
94  const Per_CPU_Control *cpu
95)
96{
97  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
98
99  return _Scheduler_Get_by_CPU_index( cpu_index );
100}
101
102RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_own_node(
103  const Thread_Control *the_thread
104)
105{
106#if defined(RTEMS_SMP)
107  return the_thread->Scheduler.own_node;
108#else
109  return the_thread->Scheduler.node;
110#endif
111}
112
113#if defined(RTEMS_SMP)
114RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
115  const Scheduler_Node *node
116)
117{
118  return node->user;
119}
120#endif
121
122ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
123
124/**
125 * @brief Acquires the scheduler instance inside a critical section (interrupts
126 * disabled).
127 *
128 * @param[in] scheduler The scheduler instance.
129 * @param[in] lock_context The lock context to use for
130 *   _Scheduler_Release_critical().
131 */
132RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
133  const Scheduler_Control *scheduler,
134  ISR_lock_Context        *lock_context
135)
136{
137  (void) scheduler;
138  _ISR_lock_Acquire( &_Scheduler_Lock, lock_context );
139}
140
141/**
142 * @brief Releases the scheduler instance inside a critical section (interrupts
143 * disabled).
144 *
145 * @param[in] scheduler The scheduler instance.
146 * @param[in] lock_context The lock context used for
147 *   _Scheduler_Acquire_critical().
148 */
149RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
150  const Scheduler_Control *scheduler,
151  ISR_lock_Context        *lock_context
152)
153{
154  (void) scheduler;
155  _ISR_lock_Release( &_Scheduler_Lock, lock_context );
156}
157
158/**
159 * The preferred method to add a new scheduler is to define the jump table
160 * entries and add a case to the _Scheduler_Initialize routine.
161 *
162 * Generic scheduling implementations that rely on the ready queue only can
163 * be found in the _Scheduler_queue_XXX functions.
164 */
165
166/*
167 * Passing the Scheduler_Control* to these functions allows for multiple
168 * scheduler's to exist simultaneously, which could be useful on an SMP
169 * system.  Then remote Schedulers may be accessible.  How to protect such
170 * accesses remains an open problem.
171 */
172
173/**
174 * @brief General scheduling decision.
175 *
176 * This kernel routine implements the scheduling decision logic for
177 * the scheduler. It does NOT dispatch.
178 *
179 * @param[in] the_thread The thread which state changed previously.
180 */
181RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
182{
183  const Scheduler_Control *scheduler;
184  ISR_lock_Context         lock_context;
185
186  scheduler = _Scheduler_Get( the_thread );
187  _Scheduler_Acquire_critical( scheduler, &lock_context );
188
189  ( *scheduler->Operations.schedule )( scheduler, the_thread );
190
191  _Scheduler_Release_critical( scheduler, &lock_context );
192}
193
194#if defined(RTEMS_SMP)
195typedef struct {
196  Thread_Control *needs_help;
197  Thread_Control *next_needs_help;
198} Scheduler_Ask_for_help_context ;
199
200RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
201  Resource_Node *resource_node,
202  void          *arg
203)
204{
205  bool done;
206  Scheduler_Ask_for_help_context *help_context = arg;
207  Thread_Control *previous_needs_help = help_context->needs_help;
208  Thread_Control *next_needs_help;
209  Thread_Control *offers_help =
210    THREAD_RESOURCE_NODE_TO_THREAD( resource_node );
211  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
212
213  next_needs_help = ( *scheduler->Operations.ask_for_help )(
214    scheduler,
215    offers_help,
216    previous_needs_help
217  );
218
219  done = next_needs_help != previous_needs_help;
220
221  if ( done ) {
222    help_context->next_needs_help = next_needs_help;
223  }
224
225  return done;
226}
227
228/**
229 * @brief Ask threads depending on resources owned by the thread for help.
230 *
231 * A thread is in need for help if it lost its assigned processor due to
232 * pre-emption by a higher priority thread or it was not possible to assign it
233 * a processor since its priority is to low on its current scheduler instance.
234 *
235 * The run-time of this function depends on the size of the resource tree of
236 * the thread needing help and other resource trees in case threads in need for
237 * help are produced during this operation.
238 *
239 * @param[in] needs_help The thread needing help.
240 */
241RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help(
242  Thread_Control *needs_help
243)
244{
245  do {
246    const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );
247
248    needs_help = ( *scheduler->Operations.ask_for_help )(
249      scheduler,
250      needs_help,
251      needs_help
252    );
253
254    if ( needs_help != NULL ) {
255      Scheduler_Ask_for_help_context help_context = { needs_help, NULL };
256
257      _Resource_Iterate(
258        &needs_help->Resource_node,
259        _Scheduler_Ask_for_help_visitor,
260        &help_context
261      );
262
263      needs_help = help_context.next_needs_help;
264    }
265  } while ( needs_help != NULL );
266}
267
268RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
269  Thread_Control *needs_help
270)
271{
272  if (
273    needs_help != NULL
274      && _Resource_Node_owns_resources( &needs_help->Resource_node )
275  ) {
276    Scheduler_Node *node = _Scheduler_Thread_get_own_node( needs_help );
277
278    if (
279      node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
280        || _Scheduler_Node_get_user( node ) != needs_help
281    ) {
282      _Scheduler_Ask_for_help( needs_help );
283    }
284  }
285}
286#endif
287
288/**
289 * @brief Scheduler yield with a particular thread.
290 *
291 * This routine is invoked when a thread wishes to voluntarily transfer control
292 * of the processor to another thread.
293 *
294 * @param[in] the_thread The yielding thread.
295 */
296RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
297{
298  const Scheduler_Control *scheduler;
299  ISR_lock_Context         lock_context;
300#if defined(RTEMS_SMP)
301  Thread_Control          *needs_help;
302#endif
303
304  scheduler = _Scheduler_Get( the_thread );
305  _Scheduler_Acquire_critical( scheduler, &lock_context );
306
307#if defined(RTEMS_SMP)
308  needs_help =
309#endif
310  ( *scheduler->Operations.yield )( scheduler, the_thread );
311
312#if defined(RTEMS_SMP)
313  _Scheduler_Ask_for_help_if_necessary( needs_help );
314#endif
315
316  _Scheduler_Release_critical( scheduler, &lock_context );
317}
318
319/**
320 * @brief Blocks a thread with respect to the scheduler.
321 *
322 * This routine removes @a the_thread from the scheduling decision for
323 * the scheduler. The primary task is to remove the thread from the
324 * ready queue.  It performs any necessary schedulering operations
325 * including the selection of a new heir thread.
326 *
327 * @param[in] the_thread The thread.
328 */
329RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
330{
331  const Scheduler_Control *scheduler;
332  ISR_lock_Context         lock_context;
333
334  scheduler = _Scheduler_Get( the_thread );
335  _Scheduler_Acquire_critical( scheduler, &lock_context );
336
337  ( *scheduler->Operations.block )( scheduler, the_thread );
338
339  _Scheduler_Release_critical( scheduler, &lock_context );
340}
341
342/**
343 * @brief Unblocks a thread with respect to the scheduler.
344 *
345 * This operation must fetch the latest thread priority value for this
346 * scheduler instance and update its internal state if necessary.
347 *
348 * @param[in] the_thread The thread.
349 *
350 * @see _Scheduler_Node_get_priority().
351 */
352RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
353{
354  const Scheduler_Control *scheduler;
355  ISR_lock_Context         lock_context;
356#if defined(RTEMS_SMP)
357  Thread_Control          *needs_help;
358#endif
359
360  scheduler = _Scheduler_Get( the_thread );
361  _Scheduler_Acquire_critical( scheduler, &lock_context );
362
363#if defined(RTEMS_SMP)
364  needs_help =
365#endif
366  ( *scheduler->Operations.unblock )( scheduler, the_thread );
367
368#if defined(RTEMS_SMP)
369  _Scheduler_Ask_for_help_if_necessary( needs_help );
370#endif
371
372  _Scheduler_Release_critical( scheduler, &lock_context );
373}
374
375/**
376 * @brief Propagates a priority change of a thread to the scheduler.
377 *
378 * On uni-processor configurations, this operation must evaluate the thread
379 * state.  In case the thread is not ready, then the priority update should be
380 * deferred to the next scheduler unblock operation.
381 *
382 * The operation must update the heir and thread dispatch necessary variables
383 * in case the set of scheduled threads changes.
384 *
385 * @param[in] the_thread The thread changing its priority.
386 *
387 * @see _Scheduler_Node_get_priority().
388 */
389RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
390{
391  const Scheduler_Control *own_scheduler;
392  ISR_lock_Context         lock_context;
393#if defined(RTEMS_SMP)
394  Thread_Control          *needs_help;
395#endif
396
397  own_scheduler = _Scheduler_Get_own( the_thread );
398  _Scheduler_Acquire_critical( own_scheduler, &lock_context );
399
400#if defined(RTEMS_SMP)
401  needs_help =
402#endif
403  ( *own_scheduler->Operations.update_priority )( own_scheduler, the_thread );
404
405#if defined(RTEMS_SMP)
406  _Scheduler_Ask_for_help_if_necessary( needs_help );
407#endif
408
409  _Scheduler_Release_critical( own_scheduler, &lock_context );
410}
411
412/**
413 * @brief Maps a thread priority from the user domain to the scheduler domain.
414 *
415 * Let M be the maximum scheduler priority.  The mapping must be bijective in
416 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
417 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
418 * other values the mapping is undefined.
419 *
420 * @param[in] scheduler The scheduler instance.
421 * @param[in] priority The user domain thread priority.
422 *
423 * @return The corresponding thread priority of the scheduler domain is returned.
424 */
425RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
426  const Scheduler_Control *scheduler,
427  Priority_Control         priority
428)
429{
430  return ( *scheduler->Operations.map_priority )( scheduler, priority );
431}
432
433/**
434 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
435 *
436 * @param[in] scheduler The scheduler instance.
437 * @param[in] priority The scheduler domain thread priority.
438 *
439 * @return The corresponding thread priority of the user domain is returned.
440 */
441RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
442  const Scheduler_Control *scheduler,
443  Priority_Control         priority
444)
445{
446  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
447}
448
449/**
450 * @brief Initializes a scheduler node.
451 *
452 * The scheduler node contains arbitrary data on function entry.  The caller
453 * must ensure that _Scheduler_Node_destroy() will be called after a
454 * _Scheduler_Node_initialize() before the memory of the scheduler node is
455 * destroyed.
456 *
457 * @param[in] scheduler The scheduler instance.
458 * @param[in] the_thread The thread containing the scheduler node.
459 * @param[in] priority The thread priority.
460 */
461RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
462  const Scheduler_Control *scheduler,
463  Thread_Control          *the_thread,
464  Priority_Control         priority
465)
466{
467  ( *scheduler->Operations.node_initialize )(
468    scheduler,
469    the_thread,
470    priority
471  );
472}
473
474/**
475 * @brief Destroys a scheduler node.
476 *
477 * The caller must ensure that _Scheduler_Node_destroy() will be called only
478 * after a corresponding _Scheduler_Node_initialize().
479 *
480 * @param[in] scheduler The scheduler instance.
481 * @param[in] the_thread The thread containing the scheduler node.
482 */
483RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
484  const Scheduler_Control *scheduler,
485  Thread_Control          *the_thread
486)
487{
488  ( *scheduler->Operations.node_destroy )( scheduler, the_thread );
489}
490
491/**
492 * @brief Releases a job of a thread with respect to the scheduler.
493 *
494 * @param[in] the_thread The thread.
495 * @param[in] deadline The deadline in watchdog ticks since boot.
496 */
497RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
498  Thread_Control *the_thread,
499  uint64_t        deadline
500)
501{
502  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
503
504  ( *scheduler->Operations.release_job )( scheduler, the_thread, deadline );
505}
506
507/**
508 * @brief Scheduler method invoked at each clock tick.
509 *
510 * This method is invoked at each clock tick to allow the scheduler
511 * implementation to perform any activities required.  For the
512 * scheduler which support standard RTEMS features, this includes
513 * time-slicing management.
514 */
515RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
516{
517  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
518  Thread_Control *executing = cpu->executing;
519
520  if ( scheduler != NULL && executing != NULL ) {
521    ( *scheduler->Operations.tick )( scheduler, executing );
522  }
523}
524
525/**
526 * @brief Starts the idle thread for a particular processor.
527 *
528 * @param[in] scheduler The scheduler instance.
529 * @param[in,out] the_thread The idle thread for the processor.
530 * @param[in,out] cpu The processor for the idle thread.
531 *
532 * @see _Thread_Create_idle().
533 */
534RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
535  const Scheduler_Control *scheduler,
536  Thread_Control          *the_thread,
537  Per_CPU_Control         *cpu
538)
539{
540  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
541}
542
543#if defined(RTEMS_SMP)
544RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
545  uint32_t cpu_index
546)
547{
548  return &_Scheduler_Assignments[ cpu_index ];
549}
550
551RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
552  const Scheduler_Assignment *assignment
553)
554{
555  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
556}
557
558RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
559  const Scheduler_Assignment *assignment
560)
561{
562  return assignment->scheduler != NULL;
563}
564#endif /* defined(RTEMS_SMP) */
565
566RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
567  const Scheduler_Control *scheduler,
568  uint32_t cpu_index
569)
570{
571#if defined(RTEMS_SMP)
572  const Scheduler_Assignment *assignment =
573    _Scheduler_Get_assignment( cpu_index );
574
575  return assignment->scheduler == scheduler;
576#else
577  (void) scheduler;
578  (void) cpu_index;
579
580  return true;
581#endif
582}
583
584#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
585
586RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
587  const Scheduler_Control *scheduler,
588  size_t                   cpusetsize,
589  cpu_set_t               *cpuset
590)
591{
592  uint32_t cpu_count = _SMP_Get_processor_count();
593  uint32_t cpu_index;
594
595  CPU_ZERO_S( cpusetsize, cpuset );
596
597  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
598#if defined(RTEMS_SMP)
599    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
600      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
601    }
602#else
603    (void) scheduler;
604
605    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
606#endif
607  }
608}
609
610RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
611  const Scheduler_Control *scheduler,
612  Thread_Control          *the_thread,
613  size_t                   cpusetsize,
614  cpu_set_t               *cpuset
615)
616{
617  (void) the_thread;
618
619  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
620
621  return true;
622}
623
624bool _Scheduler_Get_affinity(
625  Thread_Control *the_thread,
626  size_t          cpusetsize,
627  cpu_set_t      *cpuset
628);
629
630RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
631  const Scheduler_Control *scheduler,
632  Thread_Control          *the_thread,
633  size_t                   cpusetsize,
634  const cpu_set_t         *cpuset
635)
636{
637  uint32_t cpu_count = _SMP_Get_processor_count();
638  uint32_t cpu_index;
639  bool     ok = true;
640
641  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
642#if defined(RTEMS_SMP)
643    const Scheduler_Control *scheduler_of_cpu =
644      _Scheduler_Get_by_CPU_index( cpu_index );
645
646    ok = ok
647      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
648        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
649          && scheduler != scheduler_of_cpu ) );
650#else
651    (void) scheduler;
652
653    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
654#endif
655  }
656
657  return ok;
658}
659
660bool _Scheduler_Set_affinity(
661  Thread_Control  *the_thread,
662  size_t           cpusetsize,
663  const cpu_set_t *cpuset
664);
665
666#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
667
668RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
669  const Scheduler_Control *scheduler,
670  Thread_Control          *the_thread,
671  void                  ( *extract )(
672                             const Scheduler_Control *,
673                             Thread_Control * ),
674  void                  ( *schedule )(
675                             const Scheduler_Control *,
676                             Thread_Control *,
677                             bool )
678)
679{
680  ( *extract )( scheduler, the_thread );
681
682  /* TODO: flash critical section? */
683
684  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
685    ( *schedule )( scheduler, the_thread, true );
686  }
687}
688
689RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
690  const Scheduler_Control *scheduler
691)
692{
693#if defined(RTEMS_SMP)
694  return _Scheduler_Get_context( scheduler )->processor_count;
695#else
696  (void) scheduler;
697
698  return 1;
699#endif
700}
701
702RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
703{
704  return _Objects_Build_id(
705    OBJECTS_FAKE_OBJECTS_API,
706    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
707    _Objects_Local_node,
708    (uint16_t) ( scheduler_index + 1 )
709  );
710}
711
712RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
713{
714  uint32_t minimum_id = _Scheduler_Build_id( 0 );
715
716  return id - minimum_id;
717}
718
719RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
720  Objects_Id                id,
721  const Scheduler_Control **scheduler_p
722)
723{
724  uint32_t index = _Scheduler_Get_index_by_id( id );
725  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
726
727  *scheduler_p = scheduler;
728
729  return index < _Scheduler_Count
730    && _Scheduler_Get_processor_count( scheduler ) > 0;
731}
732
733RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
734{
735  const Scheduler_Control *scheduler;
736  bool ok = _Scheduler_Get_by_id( id, &scheduler );
737
738  (void) scheduler;
739
740  return ok;
741}
742
743RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
744  const Scheduler_Control *scheduler
745)
746{
747  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
748}
749
750RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
751  const Thread_Control *the_thread
752)
753{
754  return the_thread->Scheduler.node;
755}
756
757RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
758  Scheduler_Node   *node,
759  Thread_Control   *the_thread,
760  Priority_Control  priority
761)
762{
763  node->Priority.value = priority;
764  node->Priority.prepend_it = false;
765
766#if defined(RTEMS_SMP)
767  node->user = the_thread;
768  node->help_state = SCHEDULER_HELP_YOURSELF;
769  node->owner = the_thread;
770  node->idle = NULL;
771  node->accepts_help = the_thread;
772  _SMP_sequence_lock_Initialize( &node->Priority.Lock );
773#else
774  (void) the_thread;
775#endif
776}
777
778RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Node_get_priority(
779  Scheduler_Node *node,
780  bool           *prepend_it_p
781)
782{
783  Priority_Control priority;
784  bool             prepend_it;
785
786#if defined(RTEMS_SMP)
787  unsigned int     seq;
788
789  do {
790    seq = _SMP_sequence_lock_Read_begin( &node->Priority.Lock );
791#endif
792
793    priority = node->Priority.value;
794    prepend_it = node->Priority.prepend_it;
795
796#if defined(RTEMS_SMP)
797  } while ( _SMP_sequence_lock_Read_retry( &node->Priority.Lock, seq ) );
798#endif
799
800  *prepend_it_p = prepend_it;
801
802  return priority;
803}
804
805RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_priority(
806  Scheduler_Node   *node,
807  Priority_Control  new_priority,
808  bool              prepend_it
809)
810{
811#if defined(RTEMS_SMP)
812  unsigned int seq;
813
814  seq = _SMP_sequence_lock_Write_begin( &node->Priority.Lock );
815#endif
816
817  node->Priority.value = new_priority;
818  node->Priority.prepend_it = prepend_it;
819
820#if defined(RTEMS_SMP)
821  _SMP_sequence_lock_Write_end( &node->Priority.Lock, seq );
822#endif
823}
824
825#if defined(RTEMS_SMP)
826/**
827 * @brief Gets an idle thread from the scheduler instance.
828 *
829 * @param[in] context The scheduler instance context.
830 *
831 * @retval idle An idle thread for use.  This function must always return an
832 * idle thread.  If none is available, then this is a fatal error.
833 */
834typedef Thread_Control *( *Scheduler_Get_idle_thread )(
835  Scheduler_Context *context
836);
837
838/**
839 * @brief Releases an idle thread to the scheduler instance for reuse.
840 *
841 * @param[in] context The scheduler instance context.
842 * @param[in] idle The idle thread to release
843 */
844typedef void ( *Scheduler_Release_idle_thread )(
845  Scheduler_Context *context,
846  Thread_Control    *idle
847);
848
849RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner(
850  const Scheduler_Node *node
851)
852{
853  return node->owner;
854}
855
856RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle(
857  const Scheduler_Node *node
858)
859{
860  return node->idle;
861}
862
863RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_user(
864  Scheduler_Node *node,
865  Thread_Control *user
866)
867{
868  node->user = user;
869}
870
871RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
872  Thread_Control *the_thread,
873  Scheduler_Node *node
874)
875{
876  the_thread->Scheduler.node = node;
877}
878
879RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
880  Thread_Control       *the_thread,
881  Scheduler_Node       *node,
882  const Thread_Control *previous_user_of_node
883)
884{
885  const Scheduler_Control *scheduler =
886    _Scheduler_Get_own( previous_user_of_node );
887
888  the_thread->Scheduler.control = scheduler;
889  _Scheduler_Thread_set_node( the_thread, node );
890}
891
892extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
893
894RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
895  Thread_Control         *the_thread,
896  Thread_Scheduler_state  new_state
897)
898{
899  _Assert(
900    _Scheduler_Thread_state_valid_state_changes
901      [ the_thread->Scheduler.state ][ new_state ]
902  );
903
904  the_thread->Scheduler.state = new_state;
905}
906
907/**
908 * @brief Changes the scheduler help state of a thread.
909 *
910 * @param[in] the_thread The thread.
911 * @param[in] new_help_state The new help state.
912 *
913 * @return The previous help state.
914 */
915RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
916  Thread_Control       *the_thread,
917  Scheduler_Help_state  new_help_state
918)
919{
920  Scheduler_Node *node = _Scheduler_Thread_get_own_node( the_thread );
921  Scheduler_Help_state previous_help_state = node->help_state;
922
923  node->help_state = new_help_state;
924
925  return previous_help_state;
926}
927
928/**
929 * @brief Changes the resource tree root of a thread.
930 *
931 * For each node of the resource sub-tree specified by the top thread the
932 * scheduler asks for help.  So the root thread gains access to all scheduler
933 * nodes corresponding to the resource sub-tree.  In case a thread previously
934 * granted help is displaced by this operation, then the scheduler asks for
935 * help using its remaining resource tree.
936 *
937 * The run-time of this function depends on the size of the resource sub-tree
938 * and other resource trees in case threads in need for help are produced
939 * during this operation.
940 *
941 * @param[in] top The thread specifying the resource sub-tree top.
942 * @param[in] root The thread specifying the new resource sub-tree root.
943 */
944void _Scheduler_Thread_change_resource_root(
945  Thread_Control *top,
946  Thread_Control *root
947);
948
949RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
950  Scheduler_Node *node,
951  Thread_Control *idle
952)
953{
954  _Assert(
955    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
956      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
957  );
958  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
959  _Assert(
960    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
961  );
962
963  _Scheduler_Thread_set_node( idle, node );
964
965  _Scheduler_Node_set_user( node, idle );
966  node->idle = idle;
967}
968
969/**
970 * @brief Use an idle thread for this scheduler node.
971 *
972 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
973 * helping state may use an idle thread for the scheduler node owned by itself
974 * in case it executes currently using another scheduler node or in case it is
975 * in a blocking state.
976 *
977 * @param[in] context The scheduler instance context.
978 * @param[in] node The node which wants to use the idle thread.
979 * @param[in] get_idle_thread Function to get an idle thread.
980 */
981RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
982  Scheduler_Context         *context,
983  Scheduler_Node            *node,
984  Scheduler_Get_idle_thread  get_idle_thread
985)
986{
987  Thread_Control *idle = ( *get_idle_thread )( context );
988
989  _Scheduler_Set_idle_thread( node, idle );
990
991  return idle;
992}
993
994typedef enum {
995  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
996  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
997  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
998} Scheduler_Try_to_schedule_action;
999
1000/**
1001 * @brief Try to schedule this scheduler node.
1002 *
1003 * @param[in] context The scheduler instance context.
1004 * @param[in] node The node which wants to get scheduled.
1005 * @param[in] idle A potential idle thread used by a potential victim node.
1006 * @param[in] get_idle_thread Function to get an idle thread.
1007 *
1008 * @retval true This node can be scheduled.
1009 * @retval false Otherwise.
1010 */
1011RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
1012_Scheduler_Try_to_schedule_node(
1013  Scheduler_Context         *context,
1014  Scheduler_Node            *node,
1015  Thread_Control            *idle,
1016  Scheduler_Get_idle_thread  get_idle_thread
1017)
1018{
1019  Scheduler_Try_to_schedule_action action;
1020  Thread_Control *owner;
1021  Thread_Control *user;
1022
1023  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
1024
1025  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1026    return action;
1027  }
1028
1029  owner = _Scheduler_Node_get_owner( node );
1030  user = _Scheduler_Node_get_user( node );
1031
1032  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
1033    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1034      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1035    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
1036      if ( idle != NULL ) {
1037        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1038      } else {
1039        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1040      }
1041    } else {
1042      _Scheduler_Node_set_user( node, owner );
1043    }
1044  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1045    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1046      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1047    } else if ( idle != NULL ) {
1048      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1049    } else {
1050      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1051    }
1052  } else {
1053    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1054
1055    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1056      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1057    } else {
1058      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1059    }
1060  }
1061
1062  return action;
1063}
1064
1065/**
1066 * @brief Release an idle thread using this scheduler node.
1067 *
1068 * @param[in] context The scheduler instance context.
1069 * @param[in] node The node which may have an idle thread as user.
1070 * @param[in] release_idle_thread Function to release an idle thread.
1071 *
1072 * @retval idle The idle thread which used this node.
1073 * @retval NULL This node had no idle thread as an user.
1074 */
1075RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1076  Scheduler_Context             *context,
1077  Scheduler_Node                *node,
1078  Scheduler_Release_idle_thread  release_idle_thread
1079)
1080{
1081  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1082
1083  if ( idle != NULL ) {
1084    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1085
1086    node->idle = NULL;
1087    _Scheduler_Node_set_user( node, owner );
1088    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
1089    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
1090
1091    ( *release_idle_thread )( context, idle );
1092  }
1093
1094  return idle;
1095}
1096
1097RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1098  Scheduler_Node *needs_idle,
1099  Scheduler_Node *uses_idle,
1100  Thread_Control *idle
1101)
1102{
1103  uses_idle->idle = NULL;
1104  _Scheduler_Node_set_user(
1105    uses_idle,
1106    _Scheduler_Node_get_owner( uses_idle )
1107  );
1108  _Scheduler_Set_idle_thread( needs_idle, idle );
1109}
1110
1111/**
1112 * @brief Block this scheduler node.
1113 *
1114 * @param[in] context The scheduler instance context.
1115 * @param[in] thread The thread which wants to get blocked referencing this
1116 *   node.  This is not necessarily the user of this node in case the node
1117 *   participates in the scheduler helping protocol.
1118 * @param[in] node The node which wants to get blocked.
1119 * @param[in] is_scheduled This node is scheduled.
1120 * @param[in] get_idle_thread Function to get an idle thread.
1121 *
1122 * @retval true Continue with the blocking operation.
1123 * @retval false Otherwise.
1124 */
1125RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
1126  Scheduler_Context         *context,
1127  Thread_Control            *thread,
1128  Scheduler_Node            *node,
1129  bool                       is_scheduled,
1130  Scheduler_Get_idle_thread  get_idle_thread
1131)
1132{
1133  Thread_Control *old_user;
1134  Thread_Control *new_user;
1135
1136  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1137
1138  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1139    _Assert( thread == _Scheduler_Node_get_user( node ) );
1140
1141    return true;
1142  }
1143
1144  new_user = NULL;
1145
1146  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1147    if ( is_scheduled ) {
1148      _Assert( thread == _Scheduler_Node_get_user( node ) );
1149      old_user = thread;
1150      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1151    }
1152  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
1153    if ( is_scheduled ) {
1154      old_user = _Scheduler_Node_get_user( node );
1155
1156      if ( thread == old_user ) {
1157        Thread_Control *owner = _Scheduler_Node_get_owner( node );
1158
1159        if (
1160          thread != owner
1161            && owner->Scheduler.state == THREAD_SCHEDULER_READY
1162        ) {
1163          new_user = owner;
1164          _Scheduler_Node_set_user( node, new_user );
1165        } else {
1166          new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1167        }
1168      }
1169    }
1170  } else {
1171    /* Not implemented, this is part of the OMIP support path. */
1172    _Assert(0);
1173  }
1174
1175  if ( new_user != NULL ) {
1176    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1177
1178    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1179    _Thread_Set_CPU( new_user, cpu );
1180    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1181  }
1182
1183  return false;
1184}
1185
1186/**
1187 * @brief Unblock this scheduler node.
1188 *
1189 * @param[in] context The scheduler instance context.
1190 * @param[in] the_thread The thread which wants to get unblocked.
1191 * @param[in] node The node which wants to get unblocked.
1192 * @param[in] is_scheduled This node is scheduled.
1193 * @param[in] release_idle_thread Function to release an idle thread.
1194 *
1195 * @retval true Continue with the unblocking operation.
1196 * @retval false Otherwise.
1197 */
1198RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1199  Scheduler_Context             *context,
1200  Thread_Control                *the_thread,
1201  Scheduler_Node                *node,
1202  bool                           is_scheduled,
1203  Scheduler_Release_idle_thread  release_idle_thread
1204)
1205{
1206  bool unblock;
1207
1208  if ( is_scheduled ) {
1209    Thread_Control *old_user = _Scheduler_Node_get_user( node );
1210    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1211    Thread_Control *idle = _Scheduler_Release_idle_thread(
1212      context,
1213      node,
1214      release_idle_thread
1215    );
1216    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1217    Thread_Control *new_user;
1218
1219    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1220      _Assert( idle != NULL );
1221      new_user = the_thread;
1222    } else if ( idle != NULL ) {
1223      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1224      new_user = the_thread;
1225    } else if ( the_thread != owner ) {
1226      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1227      _Assert( old_user != the_thread );
1228      _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
1229      new_user = the_thread;
1230      _Scheduler_Node_set_user( node, new_user );
1231    } else {
1232      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1233      _Assert( old_user != the_thread );
1234      _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1235      new_user = NULL;
1236    }
1237
1238    if ( new_user != NULL ) {
1239      _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1240      _Thread_Set_CPU( new_user, cpu );
1241      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1242    }
1243
1244    unblock = false;
1245  } else {
1246    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1247
1248    unblock = true;
1249  }
1250
1251  return unblock;
1252}
1253
1254/**
1255 * @brief Asks a ready scheduler node for help.
1256 *
1257 * @param[in] node The ready node offering help.
1258 * @param[in] needs_help The thread needing help.
1259 *
1260 * @retval needs_help The thread needing help.
1261 */
1262RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
1263  Scheduler_Node *node,
1264  Thread_Control *needs_help
1265)
1266{
1267  _Scheduler_Node_set_user( node, needs_help );
1268
1269  return needs_help;
1270}
1271
1272/**
1273 * @brief Asks a scheduled scheduler node for help.
1274 *
1275 * @param[in] context The scheduler instance context.
1276 * @param[in] node The scheduled node offering help.
1277 * @param[in] offers_help The thread offering help.
1278 * @param[in] needs_help The thread needing help.
1279 * @param[in] previous_accepts_help The previous thread accepting help by this
1280 *   scheduler node.
1281 * @param[in] release_idle_thread Function to release an idle thread.
1282 *
1283 * @retval needs_help The previous thread accepting help by this scheduler node
1284 *   which was displaced by the thread needing help.
1285 * @retval NULL There are no more threads needing help.
1286 */
1287RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
1288  Scheduler_Context             *context,
1289  Scheduler_Node                *node,
1290  Thread_Control                *offers_help,
1291  Thread_Control                *needs_help,
1292  Thread_Control                *previous_accepts_help,
1293  Scheduler_Release_idle_thread  release_idle_thread
1294)
1295{
1296  Thread_Control *next_needs_help = NULL;
1297  Thread_Control *old_user = NULL;
1298  Thread_Control *new_user = NULL;
1299
1300  if (
1301    previous_accepts_help != needs_help
1302      && _Scheduler_Thread_get_node( previous_accepts_help ) == node
1303  ) {
1304    Thread_Control *idle = _Scheduler_Release_idle_thread(
1305      context,
1306      node,
1307      release_idle_thread
1308    );
1309
1310    if ( idle != NULL ) {
1311      old_user = idle;
1312    } else {
1313      _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
1314      old_user = previous_accepts_help;
1315    }
1316
1317    if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1318      new_user = needs_help;
1319    } else {
1320      _Assert(
1321        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1322          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1323      );
1324      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
1325
1326      new_user = offers_help;
1327    }
1328
1329    if ( previous_accepts_help != offers_help ) {
1330      next_needs_help = previous_accepts_help;
1331    }
1332  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1333    Thread_Control *idle = _Scheduler_Release_idle_thread(
1334      context,
1335      node,
1336      release_idle_thread
1337    );
1338
1339    if ( idle != NULL ) {
1340      old_user = idle;
1341    } else {
1342      old_user = _Scheduler_Node_get_user( node );
1343    }
1344
1345    new_user = needs_help;
1346  } else {
1347    _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
1348  }
1349
1350  if ( new_user != old_user ) {
1351    Per_CPU_Control *cpu_self = _Per_CPU_Get();
1352    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1353
1354    _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
1355    _Scheduler_Thread_set_scheduler_and_node(
1356      old_user,
1357      _Scheduler_Thread_get_own_node( old_user ),
1358      old_user
1359    );
1360
1361    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1362    _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
1363
1364    _Scheduler_Node_set_user( node, new_user );
1365    _Thread_Set_CPU( new_user, cpu );
1366    _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
1367  }
1368
1369  return next_needs_help;
1370}
1371
1372/**
1373 * @brief Asks a blocked scheduler node for help.
1374 *
1375 * @param[in] context The scheduler instance context.
1376 * @param[in] node The scheduled node offering help.
1377 * @param[in] offers_help The thread offering help.
1378 * @param[in] needs_help The thread needing help.
1379 *
1380 * @retval true Enqueue this scheduler node.
1381 * @retval false Otherwise.
1382 */
1383RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
1384  Scheduler_Context *context,
1385  Scheduler_Node    *node,
1386  Thread_Control    *offers_help,
1387  Thread_Control    *needs_help
1388)
1389{
1390  bool enqueue;
1391
1392  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1393
1394  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1395    _Scheduler_Node_set_user( node, needs_help );
1396    _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
1397
1398    enqueue = true;
1399  } else {
1400    enqueue = false;
1401  }
1402
1403  return enqueue;
1404}
1405#endif
1406
1407RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1408  Thread_Control *new_heir,
1409  bool            force_dispatch
1410)
1411{
1412  Thread_Control *heir = _Thread_Heir;
1413
1414  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1415#if defined(RTEMS_SMP)
1416    /*
1417     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1418     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1419     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1420     * schedulers.
1421     */
1422    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1423    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1424#endif
1425    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1426    _Thread_Heir = new_heir;
1427    _Thread_Dispatch_necessary = true;
1428  }
1429}
1430
1431RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1432  const Scheduler_Control *new_scheduler,
1433  Thread_Control          *the_thread,
1434  Priority_Control         priority
1435)
1436{
1437  Scheduler_Node *own_node;
1438
1439  if (
1440    _Thread_Owns_resources( the_thread )
1441      || the_thread->Wait.queue != NULL
1442  ) {
1443    return STATUS_RESOURCE_IN_USE;
1444  }
1445
1446  the_thread->current_priority = priority;
1447  the_thread->real_priority = priority;
1448  the_thread->Start.initial_priority = priority;
1449
1450  own_node = _Scheduler_Thread_get_own_node( the_thread );
1451
1452#if defined(RTEMS_SMP)
1453  {
1454    const Scheduler_Control *old_scheduler;
1455
1456    old_scheduler = _Scheduler_Get( the_thread );
1457
1458    if ( old_scheduler != new_scheduler ) {
1459      States_Control current_state;
1460
1461      current_state = the_thread->current_state;
1462
1463      if ( _States_Is_ready( current_state ) ) {
1464        _Scheduler_Block( the_thread );
1465      }
1466
1467      _Scheduler_Node_destroy( old_scheduler, the_thread );
1468      the_thread->Scheduler.own_control = new_scheduler;
1469      the_thread->Scheduler.control = new_scheduler;
1470      _Scheduler_Node_initialize( new_scheduler, the_thread, priority );
1471
1472      if ( _States_Is_ready( current_state ) ) {
1473        _Scheduler_Unblock( the_thread );
1474      }
1475
1476      return STATUS_SUCCESSFUL;
1477    }
1478  }
1479#endif
1480
1481  _Scheduler_Node_set_priority( own_node, priority, false );
1482  _Scheduler_Update_priority( the_thread );
1483  return STATUS_SUCCESSFUL;
1484}
1485
1486/** @} */
1487
1488#ifdef __cplusplus
1489}
1490#endif
1491
1492#endif
1493/* end of include file */
Note: See TracBrowser for help on using the repository browser.