source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 1fcac5ad

5
Last change on this file since 1fcac5ad was 1fcac5ad, checked in by Sebastian Huber <sebastian.huber@…>, on 07/25/16 at 14:35:37

score: Turn thread lock into thread wait lock

The _Thread_Lock_acquire() function had a potentially infinite run-time
due to the lack of fairness at atomic operations level.

Update #2412.
Update #2556.
Update #2765.

  • Property mode set to 100644
File size: 40.5 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/cpusetimpl.h>
25#include <rtems/score/smpimpl.h>
26#include <rtems/score/status.h>
27#include <rtems/score/threadimpl.h>
28
29#ifdef __cplusplus
30extern "C" {
31#endif
32
33/**
34 * @addtogroup ScoreScheduler
35 */
36/**@{**/
37
38/**
39 *  @brief Initializes the scheduler to the policy chosen by the user.
40 *
41 *  This routine initializes the scheduler to the policy chosen by the user
42 *  through confdefs, or to the priority scheduler with ready chains by
43 *  default.
44 */
45void _Scheduler_Handler_initialization( void );
46
47RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
48  const Scheduler_Control *scheduler
49)
50{
51  return scheduler->context;
52}
53
54RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
55  const Thread_Control *the_thread
56)
57{
58#if defined(RTEMS_SMP)
59  return the_thread->Scheduler.control;
60#else
61  (void) the_thread;
62
63  return &_Scheduler_Table[ 0 ];
64#endif
65}
66
67RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
68  const Thread_Control *the_thread
69)
70{
71#if defined(RTEMS_SMP)
72  return the_thread->Scheduler.own_control;
73#else
74  (void) the_thread;
75
76  return &_Scheduler_Table[ 0 ];
77#endif
78}
79
80RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
81  uint32_t cpu_index
82)
83{
84#if defined(RTEMS_SMP)
85  return _Scheduler_Assignments[ cpu_index ].scheduler;
86#else
87  (void) cpu_index;
88
89  return &_Scheduler_Table[ 0 ];
90#endif
91}
92
93RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
94  const Per_CPU_Control *cpu
95)
96{
97  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
98
99  return _Scheduler_Get_by_CPU_index( cpu_index );
100}
101
102RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_own_node(
103  const Thread_Control *the_thread
104)
105{
106#if defined(RTEMS_SMP)
107  return the_thread->Scheduler.own_node;
108#else
109  return the_thread->Scheduler.node;
110#endif
111}
112
113#if defined(RTEMS_SMP)
114RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
115  const Scheduler_Node *node
116)
117{
118  return node->user;
119}
120#endif
121
122ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
123
124/**
125 * @brief Acquires the scheduler instance inside a critical section (interrupts
126 * disabled).
127 *
128 * @param[in] scheduler The scheduler instance.
129 * @param[in] lock_context The lock context to use for
130 *   _Scheduler_Release_critical().
131 */
132RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
133  const Scheduler_Control *scheduler,
134  ISR_lock_Context        *lock_context
135)
136{
137  (void) scheduler;
138  _ISR_lock_Acquire( &_Scheduler_Lock, lock_context );
139}
140
141/**
142 * @brief Releases the scheduler instance inside a critical section (interrupts
143 * disabled).
144 *
145 * @param[in] scheduler The scheduler instance.
146 * @param[in] lock_context The lock context used for
147 *   _Scheduler_Acquire_critical().
148 */
149RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
150  const Scheduler_Control *scheduler,
151  ISR_lock_Context        *lock_context
152)
153{
154  (void) scheduler;
155  _ISR_lock_Release( &_Scheduler_Lock, lock_context );
156}
157
158/**
159 * The preferred method to add a new scheduler is to define the jump table
160 * entries and add a case to the _Scheduler_Initialize routine.
161 *
162 * Generic scheduling implementations that rely on the ready queue only can
163 * be found in the _Scheduler_queue_XXX functions.
164 */
165
166/*
167 * Passing the Scheduler_Control* to these functions allows for multiple
168 * scheduler's to exist simultaneously, which could be useful on an SMP
169 * system.  Then remote Schedulers may be accessible.  How to protect such
170 * accesses remains an open problem.
171 */
172
173/**
174 * @brief General scheduling decision.
175 *
176 * This kernel routine implements the scheduling decision logic for
177 * the scheduler. It does NOT dispatch.
178 *
179 * @param[in] the_thread The thread which state changed previously.
180 */
181RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
182{
183  const Scheduler_Control *scheduler;
184  ISR_lock_Context         lock_context;
185
186  scheduler = _Scheduler_Get( the_thread );
187  _Scheduler_Acquire_critical( scheduler, &lock_context );
188
189  ( *scheduler->Operations.schedule )( scheduler, the_thread );
190
191  _Scheduler_Release_critical( scheduler, &lock_context );
192}
193
194#if defined(RTEMS_SMP)
195typedef struct {
196  Thread_Control *needs_help;
197  Thread_Control *next_needs_help;
198} Scheduler_Ask_for_help_context ;
199
200RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
201  Resource_Node *resource_node,
202  void          *arg
203)
204{
205  bool done;
206  Scheduler_Ask_for_help_context *help_context = arg;
207  Thread_Control *previous_needs_help = help_context->needs_help;
208  Thread_Control *next_needs_help;
209  Thread_Control *offers_help =
210    THREAD_RESOURCE_NODE_TO_THREAD( resource_node );
211  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
212
213  next_needs_help = ( *scheduler->Operations.ask_for_help )(
214    scheduler,
215    offers_help,
216    previous_needs_help
217  );
218
219  done = next_needs_help != previous_needs_help;
220
221  if ( done ) {
222    help_context->next_needs_help = next_needs_help;
223  }
224
225  return done;
226}
227
228/**
229 * @brief Ask threads depending on resources owned by the thread for help.
230 *
231 * A thread is in need for help if it lost its assigned processor due to
232 * pre-emption by a higher priority thread or it was not possible to assign it
233 * a processor since its priority is to low on its current scheduler instance.
234 *
235 * The run-time of this function depends on the size of the resource tree of
236 * the thread needing help and other resource trees in case threads in need for
237 * help are produced during this operation.
238 *
239 * @param[in] needs_help The thread needing help.
240 */
241RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help(
242  Thread_Control *needs_help
243)
244{
245  do {
246    const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );
247
248    needs_help = ( *scheduler->Operations.ask_for_help )(
249      scheduler,
250      needs_help,
251      needs_help
252    );
253
254    if ( needs_help != NULL ) {
255      Scheduler_Ask_for_help_context help_context = { needs_help, NULL };
256
257      _Resource_Iterate(
258        &needs_help->Resource_node,
259        _Scheduler_Ask_for_help_visitor,
260        &help_context
261      );
262
263      needs_help = help_context.next_needs_help;
264    }
265  } while ( needs_help != NULL );
266}
267
268RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
269  Thread_Control *needs_help
270)
271{
272  if (
273    needs_help != NULL
274      && _Resource_Node_owns_resources( &needs_help->Resource_node )
275  ) {
276    Scheduler_Node *node = _Scheduler_Thread_get_own_node( needs_help );
277
278    if (
279      node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
280        || _Scheduler_Node_get_user( node ) != needs_help
281    ) {
282      _Scheduler_Ask_for_help( needs_help );
283    }
284  }
285}
286#endif
287
288/**
289 * @brief Scheduler yield with a particular thread.
290 *
291 * This routine is invoked when a thread wishes to voluntarily transfer control
292 * of the processor to another thread.
293 *
294 * @param[in] the_thread The yielding thread.
295 */
296RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
297{
298  const Scheduler_Control *scheduler;
299  ISR_lock_Context         lock_context;
300#if defined(RTEMS_SMP)
301  Thread_Control          *needs_help;
302#endif
303
304  scheduler = _Scheduler_Get( the_thread );
305  _Scheduler_Acquire_critical( scheduler, &lock_context );
306
307#if defined(RTEMS_SMP)
308  needs_help =
309#endif
310  ( *scheduler->Operations.yield )( scheduler, the_thread );
311
312#if defined(RTEMS_SMP)
313  _Scheduler_Ask_for_help_if_necessary( needs_help );
314#endif
315
316  _Scheduler_Release_critical( scheduler, &lock_context );
317}
318
319/**
320 * @brief Blocks a thread with respect to the scheduler.
321 *
322 * This routine removes @a the_thread from the scheduling decision for
323 * the scheduler. The primary task is to remove the thread from the
324 * ready queue.  It performs any necessary schedulering operations
325 * including the selection of a new heir thread.
326 *
327 * @param[in] the_thread The thread.
328 */
329RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
330{
331  const Scheduler_Control *scheduler;
332  ISR_lock_Context         lock_context;
333
334  scheduler = _Scheduler_Get( the_thread );
335  _Scheduler_Acquire_critical( scheduler, &lock_context );
336
337  ( *scheduler->Operations.block )( scheduler, the_thread );
338
339  _Scheduler_Release_critical( scheduler, &lock_context );
340}
341
342/**
343 * @brief Unblocks a thread with respect to the scheduler.
344 *
345 * This operation must fetch the latest thread priority value for this
346 * scheduler instance and update its internal state if necessary.
347 *
348 * @param[in] the_thread The thread.
349 *
350 * @see _Scheduler_Node_get_priority().
351 */
352RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
353{
354  const Scheduler_Control *scheduler;
355  ISR_lock_Context         lock_context;
356#if defined(RTEMS_SMP)
357  Thread_Control          *needs_help;
358#endif
359
360  scheduler = _Scheduler_Get( the_thread );
361  _Scheduler_Acquire_critical( scheduler, &lock_context );
362
363#if defined(RTEMS_SMP)
364  needs_help =
365#endif
366  ( *scheduler->Operations.unblock )( scheduler, the_thread );
367
368#if defined(RTEMS_SMP)
369  _Scheduler_Ask_for_help_if_necessary( needs_help );
370#endif
371
372  _Scheduler_Release_critical( scheduler, &lock_context );
373}
374
375/**
376 * @brief Propagates a priority change of a thread to the scheduler.
377 *
378 * On uni-processor configurations, this operation must evaluate the thread
379 * state.  In case the thread is not ready, then the priority update should be
380 * deferred to the next scheduler unblock operation.
381 *
382 * The operation must update the heir and thread dispatch necessary variables
383 * in case the set of scheduled threads changes.
384 *
385 * @param[in] the_thread The thread changing its priority.
386 *
387 * @see _Scheduler_Node_get_priority().
388 */
389RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
390{
391  const Scheduler_Control *own_scheduler;
392  ISR_lock_Context         lock_context;
393#if defined(RTEMS_SMP)
394  Thread_Control          *needs_help;
395#endif
396
397  own_scheduler = _Scheduler_Get_own( the_thread );
398  _Scheduler_Acquire_critical( own_scheduler, &lock_context );
399
400#if defined(RTEMS_SMP)
401  needs_help =
402#endif
403  ( *own_scheduler->Operations.update_priority )( own_scheduler, the_thread );
404
405#if defined(RTEMS_SMP)
406  _Scheduler_Ask_for_help_if_necessary( needs_help );
407#endif
408
409  _Scheduler_Release_critical( own_scheduler, &lock_context );
410}
411
412/**
413 * @brief Maps a thread priority from the user domain to the scheduler domain.
414 *
415 * Let M be the maximum scheduler priority.  The mapping must be bijective in
416 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
417 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
418 * other values the mapping is undefined.
419 *
420 * @param[in] scheduler The scheduler instance.
421 * @param[in] priority The user domain thread priority.
422 *
423 * @return The corresponding thread priority of the scheduler domain is returned.
424 */
425RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
426  const Scheduler_Control *scheduler,
427  Priority_Control         priority
428)
429{
430  return ( *scheduler->Operations.map_priority )( scheduler, priority );
431}
432
433/**
434 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
435 *
436 * @param[in] scheduler The scheduler instance.
437 * @param[in] priority The scheduler domain thread priority.
438 *
439 * @return The corresponding thread priority of the user domain is returned.
440 */
441RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
442  const Scheduler_Control *scheduler,
443  Priority_Control         priority
444)
445{
446  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
447}
448
449/**
450 * @brief Initializes a scheduler node.
451 *
452 * The scheduler node contains arbitrary data on function entry.  The caller
453 * must ensure that _Scheduler_Node_destroy() will be called after a
454 * _Scheduler_Node_initialize() before the memory of the scheduler node is
455 * destroyed.
456 *
457 * @param[in] scheduler The scheduler instance.
458 * @param[in] node The scheduler node to initialize.
459 * @param[in] the_thread The thread of the scheduler node to initialize.
460 * @param[in] priority The thread priority.
461 */
462RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
463  const Scheduler_Control *scheduler,
464  Scheduler_Node          *node,
465  Thread_Control          *the_thread,
466  Priority_Control         priority
467)
468{
469  ( *scheduler->Operations.node_initialize )(
470    scheduler,
471    node,
472    the_thread,
473    priority
474  );
475}
476
477/**
478 * @brief Destroys a scheduler node.
479 *
480 * The caller must ensure that _Scheduler_Node_destroy() will be called only
481 * after a corresponding _Scheduler_Node_initialize().
482 *
483 * @param[in] scheduler The scheduler instance.
484 * @param[in] node The scheduler node to destroy.
485 */
486RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
487  const Scheduler_Control *scheduler,
488  Scheduler_Node          *node
489)
490{
491  ( *scheduler->Operations.node_destroy )( scheduler, node );
492}
493
494/**
495 * @brief Releases a job of a thread with respect to the scheduler.
496 *
497 * @param[in] the_thread The thread.
498 * @param[in] deadline The deadline in watchdog ticks since boot.
499 */
500RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
501  Thread_Control *the_thread,
502  uint64_t        deadline
503)
504{
505  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
506
507  ( *scheduler->Operations.release_job )( scheduler, the_thread, deadline );
508}
509
510/**
511 * @brief Scheduler method invoked at each clock tick.
512 *
513 * This method is invoked at each clock tick to allow the scheduler
514 * implementation to perform any activities required.  For the
515 * scheduler which support standard RTEMS features, this includes
516 * time-slicing management.
517 */
518RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
519{
520  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
521  Thread_Control *executing = cpu->executing;
522
523  if ( scheduler != NULL && executing != NULL ) {
524    ( *scheduler->Operations.tick )( scheduler, executing );
525  }
526}
527
528/**
529 * @brief Starts the idle thread for a particular processor.
530 *
531 * @param[in] scheduler The scheduler instance.
532 * @param[in,out] the_thread The idle thread for the processor.
533 * @param[in,out] cpu The processor for the idle thread.
534 *
535 * @see _Thread_Create_idle().
536 */
537RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
538  const Scheduler_Control *scheduler,
539  Thread_Control          *the_thread,
540  Per_CPU_Control         *cpu
541)
542{
543  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
544}
545
546#if defined(RTEMS_SMP)
547RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
548  uint32_t cpu_index
549)
550{
551  return &_Scheduler_Assignments[ cpu_index ];
552}
553
554RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
555  const Scheduler_Assignment *assignment
556)
557{
558  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
559}
560
561RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
562  const Scheduler_Assignment *assignment
563)
564{
565  return assignment->scheduler != NULL;
566}
567#endif /* defined(RTEMS_SMP) */
568
569RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
570  const Scheduler_Control *scheduler,
571  uint32_t cpu_index
572)
573{
574#if defined(RTEMS_SMP)
575  const Scheduler_Assignment *assignment =
576    _Scheduler_Get_assignment( cpu_index );
577
578  return assignment->scheduler == scheduler;
579#else
580  (void) scheduler;
581  (void) cpu_index;
582
583  return true;
584#endif
585}
586
587#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
588
589RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
590  const Scheduler_Control *scheduler,
591  size_t                   cpusetsize,
592  cpu_set_t               *cpuset
593)
594{
595  uint32_t cpu_count = _SMP_Get_processor_count();
596  uint32_t cpu_index;
597
598  CPU_ZERO_S( cpusetsize, cpuset );
599
600  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
601#if defined(RTEMS_SMP)
602    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
603      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
604    }
605#else
606    (void) scheduler;
607
608    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
609#endif
610  }
611}
612
613RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
614  const Scheduler_Control *scheduler,
615  Thread_Control          *the_thread,
616  size_t                   cpusetsize,
617  cpu_set_t               *cpuset
618)
619{
620  (void) the_thread;
621
622  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
623
624  return true;
625}
626
627bool _Scheduler_Get_affinity(
628  Thread_Control *the_thread,
629  size_t          cpusetsize,
630  cpu_set_t      *cpuset
631);
632
633RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
634  const Scheduler_Control *scheduler,
635  Thread_Control          *the_thread,
636  size_t                   cpusetsize,
637  const cpu_set_t         *cpuset
638)
639{
640  uint32_t cpu_count = _SMP_Get_processor_count();
641  uint32_t cpu_index;
642  bool     ok = true;
643
644  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
645#if defined(RTEMS_SMP)
646    const Scheduler_Control *scheduler_of_cpu =
647      _Scheduler_Get_by_CPU_index( cpu_index );
648
649    ok = ok
650      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
651        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
652          && scheduler != scheduler_of_cpu ) );
653#else
654    (void) scheduler;
655
656    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
657#endif
658  }
659
660  return ok;
661}
662
663bool _Scheduler_Set_affinity(
664  Thread_Control  *the_thread,
665  size_t           cpusetsize,
666  const cpu_set_t *cpuset
667);
668
669#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
670
671RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
672  const Scheduler_Control *scheduler,
673  Thread_Control          *the_thread,
674  void                  ( *extract )(
675                             const Scheduler_Control *,
676                             Thread_Control * ),
677  void                  ( *schedule )(
678                             const Scheduler_Control *,
679                             Thread_Control *,
680                             bool )
681)
682{
683  ( *extract )( scheduler, the_thread );
684
685  /* TODO: flash critical section? */
686
687  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
688    ( *schedule )( scheduler, the_thread, true );
689  }
690}
691
692RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
693  const Scheduler_Control *scheduler
694)
695{
696#if defined(RTEMS_SMP)
697  return _Scheduler_Get_context( scheduler )->processor_count;
698#else
699  (void) scheduler;
700
701  return 1;
702#endif
703}
704
705RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
706{
707  return _Objects_Build_id(
708    OBJECTS_FAKE_OBJECTS_API,
709    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
710    _Objects_Local_node,
711    (uint16_t) ( scheduler_index + 1 )
712  );
713}
714
715RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
716{
717  uint32_t minimum_id = _Scheduler_Build_id( 0 );
718
719  return id - minimum_id;
720}
721
722RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
723  Objects_Id                id,
724  const Scheduler_Control **scheduler_p
725)
726{
727  uint32_t index = _Scheduler_Get_index_by_id( id );
728  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
729
730  *scheduler_p = scheduler;
731
732  return index < _Scheduler_Count
733    && _Scheduler_Get_processor_count( scheduler ) > 0;
734}
735
736RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
737{
738  const Scheduler_Control *scheduler;
739  bool ok = _Scheduler_Get_by_id( id, &scheduler );
740
741  (void) scheduler;
742
743  return ok;
744}
745
746RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
747  const Scheduler_Control *scheduler
748)
749{
750  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
751}
752
753RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
754  const Thread_Control *the_thread
755)
756{
757  return the_thread->Scheduler.node;
758}
759
760RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
761  Scheduler_Node   *node,
762  Thread_Control   *the_thread,
763  Priority_Control  priority
764)
765{
766  node->Priority.value = priority;
767  node->Priority.prepend_it = false;
768
769#if defined(RTEMS_SMP)
770  node->user = the_thread;
771  node->help_state = SCHEDULER_HELP_YOURSELF;
772  node->owner = the_thread;
773  node->idle = NULL;
774  node->accepts_help = the_thread;
775  _SMP_sequence_lock_Initialize( &node->Priority.Lock );
776#else
777  (void) the_thread;
778#endif
779}
780
781RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Node_get_priority(
782  Scheduler_Node *node,
783  bool           *prepend_it_p
784)
785{
786  Priority_Control priority;
787  bool             prepend_it;
788
789#if defined(RTEMS_SMP)
790  unsigned int     seq;
791
792  do {
793    seq = _SMP_sequence_lock_Read_begin( &node->Priority.Lock );
794#endif
795
796    priority = node->Priority.value;
797    prepend_it = node->Priority.prepend_it;
798
799#if defined(RTEMS_SMP)
800  } while ( _SMP_sequence_lock_Read_retry( &node->Priority.Lock, seq ) );
801#endif
802
803  *prepend_it_p = prepend_it;
804
805  return priority;
806}
807
808RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_priority(
809  Scheduler_Node   *node,
810  Priority_Control  new_priority,
811  bool              prepend_it
812)
813{
814#if defined(RTEMS_SMP)
815  unsigned int seq;
816
817  seq = _SMP_sequence_lock_Write_begin( &node->Priority.Lock );
818#endif
819
820  node->Priority.value = new_priority;
821  node->Priority.prepend_it = prepend_it;
822
823#if defined(RTEMS_SMP)
824  _SMP_sequence_lock_Write_end( &node->Priority.Lock, seq );
825#endif
826}
827
828RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
829  Thread_Control   *the_thread,
830  Priority_Control  new_priority,
831  bool              prepend_it
832)
833{
834  Scheduler_Node *own_node;
835
836  own_node = _Scheduler_Thread_get_own_node( the_thread );
837  _Scheduler_Node_set_priority( own_node, new_priority, prepend_it );
838
839  the_thread->current_priority = new_priority;
840}
841
842#if defined(RTEMS_SMP)
843/**
844 * @brief Gets an idle thread from the scheduler instance.
845 *
846 * @param[in] context The scheduler instance context.
847 *
848 * @retval idle An idle thread for use.  This function must always return an
849 * idle thread.  If none is available, then this is a fatal error.
850 */
851typedef Thread_Control *( *Scheduler_Get_idle_thread )(
852  Scheduler_Context *context
853);
854
855/**
856 * @brief Releases an idle thread to the scheduler instance for reuse.
857 *
858 * @param[in] context The scheduler instance context.
859 * @param[in] idle The idle thread to release
860 */
861typedef void ( *Scheduler_Release_idle_thread )(
862  Scheduler_Context *context,
863  Thread_Control    *idle
864);
865
866RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner(
867  const Scheduler_Node *node
868)
869{
870  return node->owner;
871}
872
873RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle(
874  const Scheduler_Node *node
875)
876{
877  return node->idle;
878}
879
880RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_user(
881  Scheduler_Node *node,
882  Thread_Control *user
883)
884{
885  node->user = user;
886}
887
888RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
889  Thread_Control *the_thread,
890  Scheduler_Node *node
891)
892{
893  the_thread->Scheduler.node = node;
894}
895
896RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
897  Thread_Control       *the_thread,
898  Scheduler_Node       *node,
899  const Thread_Control *previous_user_of_node
900)
901{
902  const Scheduler_Control *scheduler =
903    _Scheduler_Get_own( previous_user_of_node );
904
905  the_thread->Scheduler.control = scheduler;
906  _Scheduler_Thread_set_node( the_thread, node );
907}
908
909extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
910
911RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
912  Thread_Control         *the_thread,
913  Thread_Scheduler_state  new_state
914)
915{
916  _Assert(
917    _Scheduler_Thread_state_valid_state_changes
918      [ the_thread->Scheduler.state ][ new_state ]
919  );
920
921  the_thread->Scheduler.state = new_state;
922}
923
924/**
925 * @brief Changes the scheduler help state of a thread.
926 *
927 * @param[in] the_thread The thread.
928 * @param[in] new_help_state The new help state.
929 *
930 * @return The previous help state.
931 */
932RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
933  Thread_Control       *the_thread,
934  Scheduler_Help_state  new_help_state
935)
936{
937  Scheduler_Node *node = _Scheduler_Thread_get_own_node( the_thread );
938  Scheduler_Help_state previous_help_state = node->help_state;
939
940  node->help_state = new_help_state;
941
942  return previous_help_state;
943}
944
945/**
946 * @brief Changes the resource tree root of a thread.
947 *
948 * For each node of the resource sub-tree specified by the top thread the
949 * scheduler asks for help.  So the root thread gains access to all scheduler
950 * nodes corresponding to the resource sub-tree.  In case a thread previously
951 * granted help is displaced by this operation, then the scheduler asks for
952 * help using its remaining resource tree.
953 *
954 * The run-time of this function depends on the size of the resource sub-tree
955 * and other resource trees in case threads in need for help are produced
956 * during this operation.
957 *
958 * @param[in] top The thread specifying the resource sub-tree top.
959 * @param[in] root The thread specifying the new resource sub-tree root.
960 */
961void _Scheduler_Thread_change_resource_root(
962  Thread_Control *top,
963  Thread_Control *root
964);
965
966RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
967  Scheduler_Node *node,
968  Thread_Control *idle
969)
970{
971  _Assert(
972    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
973      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
974  );
975  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
976  _Assert(
977    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
978  );
979
980  _Scheduler_Thread_set_node( idle, node );
981
982  _Scheduler_Node_set_user( node, idle );
983  node->idle = idle;
984}
985
986/**
987 * @brief Use an idle thread for this scheduler node.
988 *
989 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
990 * helping state may use an idle thread for the scheduler node owned by itself
991 * in case it executes currently using another scheduler node or in case it is
992 * in a blocking state.
993 *
994 * @param[in] context The scheduler instance context.
995 * @param[in] node The node which wants to use the idle thread.
996 * @param[in] get_idle_thread Function to get an idle thread.
997 */
998RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
999  Scheduler_Context         *context,
1000  Scheduler_Node            *node,
1001  Scheduler_Get_idle_thread  get_idle_thread
1002)
1003{
1004  Thread_Control *idle = ( *get_idle_thread )( context );
1005
1006  _Scheduler_Set_idle_thread( node, idle );
1007
1008  return idle;
1009}
1010
1011typedef enum {
1012  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
1013  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
1014  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
1015} Scheduler_Try_to_schedule_action;
1016
1017/**
1018 * @brief Try to schedule this scheduler node.
1019 *
1020 * @param[in] context The scheduler instance context.
1021 * @param[in] node The node which wants to get scheduled.
1022 * @param[in] idle A potential idle thread used by a potential victim node.
1023 * @param[in] get_idle_thread Function to get an idle thread.
1024 *
1025 * @retval true This node can be scheduled.
1026 * @retval false Otherwise.
1027 */
1028RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
1029_Scheduler_Try_to_schedule_node(
1030  Scheduler_Context         *context,
1031  Scheduler_Node            *node,
1032  Thread_Control            *idle,
1033  Scheduler_Get_idle_thread  get_idle_thread
1034)
1035{
1036  Scheduler_Try_to_schedule_action action;
1037  Thread_Control *owner;
1038  Thread_Control *user;
1039
1040  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
1041
1042  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1043    return action;
1044  }
1045
1046  owner = _Scheduler_Node_get_owner( node );
1047  user = _Scheduler_Node_get_user( node );
1048
1049  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
1050    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1051      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1052    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
1053      if ( idle != NULL ) {
1054        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1055      } else {
1056        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1057      }
1058    } else {
1059      _Scheduler_Node_set_user( node, owner );
1060    }
1061  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1062    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1063      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1064    } else if ( idle != NULL ) {
1065      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1066    } else {
1067      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1068    }
1069  } else {
1070    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1071
1072    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1073      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1074    } else {
1075      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1076    }
1077  }
1078
1079  return action;
1080}
1081
1082/**
1083 * @brief Release an idle thread using this scheduler node.
1084 *
1085 * @param[in] context The scheduler instance context.
1086 * @param[in] node The node which may have an idle thread as user.
1087 * @param[in] release_idle_thread Function to release an idle thread.
1088 *
1089 * @retval idle The idle thread which used this node.
1090 * @retval NULL This node had no idle thread as an user.
1091 */
1092RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1093  Scheduler_Context             *context,
1094  Scheduler_Node                *node,
1095  Scheduler_Release_idle_thread  release_idle_thread
1096)
1097{
1098  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1099
1100  if ( idle != NULL ) {
1101    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1102
1103    node->idle = NULL;
1104    _Scheduler_Node_set_user( node, owner );
1105    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
1106    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
1107
1108    ( *release_idle_thread )( context, idle );
1109  }
1110
1111  return idle;
1112}
1113
1114RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1115  Scheduler_Node *needs_idle,
1116  Scheduler_Node *uses_idle,
1117  Thread_Control *idle
1118)
1119{
1120  uses_idle->idle = NULL;
1121  _Scheduler_Node_set_user(
1122    uses_idle,
1123    _Scheduler_Node_get_owner( uses_idle )
1124  );
1125  _Scheduler_Set_idle_thread( needs_idle, idle );
1126}
1127
1128/**
1129 * @brief Block this scheduler node.
1130 *
1131 * @param[in] context The scheduler instance context.
1132 * @param[in] thread The thread which wants to get blocked referencing this
1133 *   node.  This is not necessarily the user of this node in case the node
1134 *   participates in the scheduler helping protocol.
1135 * @param[in] node The node which wants to get blocked.
1136 * @param[in] is_scheduled This node is scheduled.
1137 * @param[in] get_idle_thread Function to get an idle thread.
1138 *
1139 * @retval true Continue with the blocking operation.
1140 * @retval false Otherwise.
1141 */
1142RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
1143  Scheduler_Context         *context,
1144  Thread_Control            *thread,
1145  Scheduler_Node            *node,
1146  bool                       is_scheduled,
1147  Scheduler_Get_idle_thread  get_idle_thread
1148)
1149{
1150  Thread_Control *old_user;
1151  Thread_Control *new_user;
1152
1153  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1154
1155  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1156    _Assert( thread == _Scheduler_Node_get_user( node ) );
1157
1158    return true;
1159  }
1160
1161  new_user = NULL;
1162
1163  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1164    if ( is_scheduled ) {
1165      _Assert( thread == _Scheduler_Node_get_user( node ) );
1166      old_user = thread;
1167      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1168    }
1169  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
1170    if ( is_scheduled ) {
1171      old_user = _Scheduler_Node_get_user( node );
1172
1173      if ( thread == old_user ) {
1174        Thread_Control *owner = _Scheduler_Node_get_owner( node );
1175
1176        if (
1177          thread != owner
1178            && owner->Scheduler.state == THREAD_SCHEDULER_READY
1179        ) {
1180          new_user = owner;
1181          _Scheduler_Node_set_user( node, new_user );
1182        } else {
1183          new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1184        }
1185      }
1186    }
1187  } else {
1188    /* Not implemented, this is part of the OMIP support path. */
1189    _Assert(0);
1190  }
1191
1192  if ( new_user != NULL ) {
1193    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1194
1195    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1196    _Thread_Set_CPU( new_user, cpu );
1197    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1198  }
1199
1200  return false;
1201}
1202
1203/**
1204 * @brief Unblock this scheduler node.
1205 *
1206 * @param[in] context The scheduler instance context.
1207 * @param[in] the_thread The thread which wants to get unblocked.
1208 * @param[in] node The node which wants to get unblocked.
1209 * @param[in] is_scheduled This node is scheduled.
1210 * @param[in] release_idle_thread Function to release an idle thread.
1211 *
1212 * @retval true Continue with the unblocking operation.
1213 * @retval false Otherwise.
1214 */
1215RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1216  Scheduler_Context             *context,
1217  Thread_Control                *the_thread,
1218  Scheduler_Node                *node,
1219  bool                           is_scheduled,
1220  Scheduler_Release_idle_thread  release_idle_thread
1221)
1222{
1223  bool unblock;
1224
1225  if ( is_scheduled ) {
1226    Thread_Control *old_user = _Scheduler_Node_get_user( node );
1227    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1228    Thread_Control *idle = _Scheduler_Release_idle_thread(
1229      context,
1230      node,
1231      release_idle_thread
1232    );
1233    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1234    Thread_Control *new_user;
1235
1236    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1237      _Assert( idle != NULL );
1238      new_user = the_thread;
1239    } else if ( idle != NULL ) {
1240      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1241      new_user = the_thread;
1242    } else if ( the_thread != owner ) {
1243      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1244      _Assert( old_user != the_thread );
1245      _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
1246      new_user = the_thread;
1247      _Scheduler_Node_set_user( node, new_user );
1248    } else {
1249      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1250      _Assert( old_user != the_thread );
1251      _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1252      new_user = NULL;
1253    }
1254
1255    if ( new_user != NULL ) {
1256      _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1257      _Thread_Set_CPU( new_user, cpu );
1258      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1259    }
1260
1261    unblock = false;
1262  } else {
1263    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1264
1265    unblock = true;
1266  }
1267
1268  return unblock;
1269}
1270
1271/**
1272 * @brief Asks a ready scheduler node for help.
1273 *
1274 * @param[in] node The ready node offering help.
1275 * @param[in] needs_help The thread needing help.
1276 *
1277 * @retval needs_help The thread needing help.
1278 */
1279RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
1280  Scheduler_Node *node,
1281  Thread_Control *needs_help
1282)
1283{
1284  _Scheduler_Node_set_user( node, needs_help );
1285
1286  return needs_help;
1287}
1288
1289/**
1290 * @brief Asks a scheduled scheduler node for help.
1291 *
1292 * @param[in] context The scheduler instance context.
1293 * @param[in] node The scheduled node offering help.
1294 * @param[in] offers_help The thread offering help.
1295 * @param[in] needs_help The thread needing help.
1296 * @param[in] previous_accepts_help The previous thread accepting help by this
1297 *   scheduler node.
1298 * @param[in] release_idle_thread Function to release an idle thread.
1299 *
1300 * @retval needs_help The previous thread accepting help by this scheduler node
1301 *   which was displaced by the thread needing help.
1302 * @retval NULL There are no more threads needing help.
1303 */
1304RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
1305  Scheduler_Context             *context,
1306  Scheduler_Node                *node,
1307  Thread_Control                *offers_help,
1308  Thread_Control                *needs_help,
1309  Thread_Control                *previous_accepts_help,
1310  Scheduler_Release_idle_thread  release_idle_thread
1311)
1312{
1313  Thread_Control *next_needs_help = NULL;
1314  Thread_Control *old_user = NULL;
1315  Thread_Control *new_user = NULL;
1316
1317  if (
1318    previous_accepts_help != needs_help
1319      && _Scheduler_Thread_get_node( previous_accepts_help ) == node
1320  ) {
1321    Thread_Control *idle = _Scheduler_Release_idle_thread(
1322      context,
1323      node,
1324      release_idle_thread
1325    );
1326
1327    if ( idle != NULL ) {
1328      old_user = idle;
1329    } else {
1330      _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
1331      old_user = previous_accepts_help;
1332    }
1333
1334    if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1335      new_user = needs_help;
1336    } else {
1337      _Assert(
1338        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1339          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1340      );
1341      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
1342
1343      new_user = offers_help;
1344    }
1345
1346    if ( previous_accepts_help != offers_help ) {
1347      next_needs_help = previous_accepts_help;
1348    }
1349  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1350    Thread_Control *idle = _Scheduler_Release_idle_thread(
1351      context,
1352      node,
1353      release_idle_thread
1354    );
1355
1356    if ( idle != NULL ) {
1357      old_user = idle;
1358    } else {
1359      old_user = _Scheduler_Node_get_user( node );
1360    }
1361
1362    new_user = needs_help;
1363  } else {
1364    _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
1365  }
1366
1367  if ( new_user != old_user ) {
1368    Per_CPU_Control *cpu_self = _Per_CPU_Get();
1369    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1370
1371    _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
1372    _Scheduler_Thread_set_scheduler_and_node(
1373      old_user,
1374      _Scheduler_Thread_get_own_node( old_user ),
1375      old_user
1376    );
1377
1378    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1379    _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
1380
1381    _Scheduler_Node_set_user( node, new_user );
1382    _Thread_Set_CPU( new_user, cpu );
1383    _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
1384  }
1385
1386  return next_needs_help;
1387}
1388
1389/**
1390 * @brief Asks a blocked scheduler node for help.
1391 *
1392 * @param[in] context The scheduler instance context.
1393 * @param[in] node The scheduled node offering help.
1394 * @param[in] offers_help The thread offering help.
1395 * @param[in] needs_help The thread needing help.
1396 *
1397 * @retval true Enqueue this scheduler node.
1398 * @retval false Otherwise.
1399 */
1400RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
1401  Scheduler_Context *context,
1402  Scheduler_Node    *node,
1403  Thread_Control    *offers_help,
1404  Thread_Control    *needs_help
1405)
1406{
1407  bool enqueue;
1408
1409  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1410
1411  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1412    _Scheduler_Node_set_user( node, needs_help );
1413    _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
1414
1415    enqueue = true;
1416  } else {
1417    enqueue = false;
1418  }
1419
1420  return enqueue;
1421}
1422#endif
1423
1424RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1425  Thread_Control *new_heir,
1426  bool            force_dispatch
1427)
1428{
1429  Thread_Control *heir = _Thread_Heir;
1430
1431  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1432#if defined(RTEMS_SMP)
1433    /*
1434     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1435     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1436     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1437     * schedulers.
1438     */
1439    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1440    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1441#endif
1442    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1443    _Thread_Heir = new_heir;
1444    _Thread_Dispatch_necessary = true;
1445  }
1446}
1447
1448RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1449  const Scheduler_Control *new_scheduler,
1450  Thread_Control          *the_thread,
1451  Priority_Control         priority
1452)
1453{
1454  Scheduler_Node *own_node;
1455
1456  if (
1457    _Thread_Owns_resources( the_thread )
1458      || the_thread->Wait.queue != NULL
1459  ) {
1460    return STATUS_RESOURCE_IN_USE;
1461  }
1462
1463  the_thread->current_priority = priority;
1464  the_thread->real_priority = priority;
1465  the_thread->Start.initial_priority = priority;
1466
1467  own_node = _Scheduler_Thread_get_own_node( the_thread );
1468
1469#if defined(RTEMS_SMP)
1470  {
1471    const Scheduler_Control *old_scheduler;
1472
1473    old_scheduler = _Scheduler_Get( the_thread );
1474
1475    if ( old_scheduler != new_scheduler ) {
1476      States_Control current_state;
1477
1478      current_state = the_thread->current_state;
1479
1480      if ( _States_Is_ready( current_state ) ) {
1481        _Scheduler_Block( the_thread );
1482      }
1483
1484      _Scheduler_Node_destroy( old_scheduler, own_node );
1485      the_thread->Scheduler.own_control = new_scheduler;
1486      the_thread->Scheduler.control = new_scheduler;
1487      _Scheduler_Node_initialize(
1488        new_scheduler,
1489        own_node,
1490        the_thread,
1491        priority
1492      );
1493
1494      if ( _States_Is_ready( current_state ) ) {
1495        _Scheduler_Unblock( the_thread );
1496      }
1497
1498      return STATUS_SUCCESSFUL;
1499    }
1500  }
1501#endif
1502
1503  _Scheduler_Node_set_priority( own_node, priority, false );
1504  _Scheduler_Update_priority( the_thread );
1505  return STATUS_SUCCESSFUL;
1506}
1507
1508/** @} */
1509
1510#ifdef __cplusplus
1511}
1512#endif
1513
1514#endif
1515/* end of include file */
Note: See TracBrowser for help on using the repository browser.