source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 9bfad8c

5
Last change on this file since 9bfad8c was 9bfad8c, checked in by Sebastian Huber <sebastian.huber@…>, on 06/08/16 at 20:22:46

score: Add thread priority to scheduler nodes

The thread priority is manifest in two independent areas. One area is
the user visible thread priority along with a potential thread queue.
The other is the scheduler. Currently, a thread priority update via
_Thread_Change_priority() first updates the user visble thread priority
and the thread queue, then the scheduler is notified if necessary. The
priority is passed to the scheduler via a local variable. A generation
counter ensures that the scheduler discards out-of-date priorities.

This use of a local variable ties the update in these two areas close
together. For later enhancements and the OMIP locking protocol
implementation we need more flexibility. Add a thread priority
information block to Scheduler_Node and synchronize priority value
updates via a sequence lock on SMP configurations.

Update #2556.

  • Property mode set to 100644
File size: 39.5 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/cpusetimpl.h>
25#include <rtems/score/smpimpl.h>
26#include <rtems/score/threadimpl.h>
27
28#ifdef __cplusplus
29extern "C" {
30#endif
31
32/**
33 * @addtogroup ScoreScheduler
34 */
35/**@{**/
36
37/**
38 *  @brief Initializes the scheduler to the policy chosen by the user.
39 *
40 *  This routine initializes the scheduler to the policy chosen by the user
41 *  through confdefs, or to the priority scheduler with ready chains by
42 *  default.
43 */
44void _Scheduler_Handler_initialization( void );
45
46RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
47  const Scheduler_Control *scheduler
48)
49{
50  return scheduler->context;
51}
52
53RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
54  const Thread_Control *the_thread
55)
56{
57#if defined(RTEMS_SMP)
58  return the_thread->Scheduler.control;
59#else
60  (void) the_thread;
61
62  return &_Scheduler_Table[ 0 ];
63#endif
64}
65
66RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
67  const Thread_Control *the_thread
68)
69{
70#if defined(RTEMS_SMP)
71  return the_thread->Scheduler.own_control;
72#else
73  (void) the_thread;
74
75  return &_Scheduler_Table[ 0 ];
76#endif
77}
78
79RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
80  uint32_t cpu_index
81)
82{
83#if defined(RTEMS_SMP)
84  return _Scheduler_Assignments[ cpu_index ].scheduler;
85#else
86  (void) cpu_index;
87
88  return &_Scheduler_Table[ 0 ];
89#endif
90}
91
92RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
93  const Per_CPU_Control *cpu
94)
95{
96  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
97
98  return _Scheduler_Get_by_CPU_index( cpu_index );
99}
100
101RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_own_node(
102  const Thread_Control *the_thread
103)
104{
105#if defined(RTEMS_SMP)
106  return the_thread->Scheduler.own_node;
107#else
108  return the_thread->Scheduler.node;
109#endif
110}
111
112#if defined(RTEMS_SMP)
113RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
114  const Scheduler_Node *node
115)
116{
117  return node->user;
118}
119#endif
120
121ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
122
123/**
124 * @brief Acquires the scheduler instance inside a critical section (interrupts
125 * disabled).
126 *
127 * @param[in] scheduler The scheduler instance.
128 * @param[in] lock_context The lock context to use for
129 *   _Scheduler_Release_critical().
130 */
131RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
132  const Scheduler_Control *scheduler,
133  ISR_lock_Context        *lock_context
134)
135{
136  (void) scheduler;
137  _ISR_lock_Acquire( &_Scheduler_Lock, lock_context );
138}
139
140/**
141 * @brief Releases the scheduler instance inside a critical section (interrupts
142 * disabled).
143 *
144 * @param[in] scheduler The scheduler instance.
145 * @param[in] lock_context The lock context used for
146 *   _Scheduler_Acquire_critical().
147 */
148RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
149  const Scheduler_Control *scheduler,
150  ISR_lock_Context        *lock_context
151)
152{
153  (void) scheduler;
154  _ISR_lock_Release( &_Scheduler_Lock, lock_context );
155}
156
157/**
158 * The preferred method to add a new scheduler is to define the jump table
159 * entries and add a case to the _Scheduler_Initialize routine.
160 *
161 * Generic scheduling implementations that rely on the ready queue only can
162 * be found in the _Scheduler_queue_XXX functions.
163 */
164
165/*
166 * Passing the Scheduler_Control* to these functions allows for multiple
167 * scheduler's to exist simultaneously, which could be useful on an SMP
168 * system.  Then remote Schedulers may be accessible.  How to protect such
169 * accesses remains an open problem.
170 */
171
172/**
173 * @brief General scheduling decision.
174 *
175 * This kernel routine implements the scheduling decision logic for
176 * the scheduler. It does NOT dispatch.
177 *
178 * @param[in] the_thread The thread which state changed previously.
179 */
180RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
181{
182  const Scheduler_Control *scheduler;
183  ISR_lock_Context         lock_context;
184
185  scheduler = _Scheduler_Get( the_thread );
186  _Scheduler_Acquire_critical( scheduler, &lock_context );
187
188  ( *scheduler->Operations.schedule )( scheduler, the_thread );
189
190  _Scheduler_Release_critical( scheduler, &lock_context );
191}
192
193#if defined(RTEMS_SMP)
194typedef struct {
195  Thread_Control *needs_help;
196  Thread_Control *next_needs_help;
197} Scheduler_Ask_for_help_context ;
198
199RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
200  Resource_Node *resource_node,
201  void          *arg
202)
203{
204  bool done;
205  Scheduler_Ask_for_help_context *help_context = arg;
206  Thread_Control *previous_needs_help = help_context->needs_help;
207  Thread_Control *next_needs_help;
208  Thread_Control *offers_help =
209    THREAD_RESOURCE_NODE_TO_THREAD( resource_node );
210  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
211
212  next_needs_help = ( *scheduler->Operations.ask_for_help )(
213    scheduler,
214    offers_help,
215    previous_needs_help
216  );
217
218  done = next_needs_help != previous_needs_help;
219
220  if ( done ) {
221    help_context->next_needs_help = next_needs_help;
222  }
223
224  return done;
225}
226
227/**
228 * @brief Ask threads depending on resources owned by the thread for help.
229 *
230 * A thread is in need for help if it lost its assigned processor due to
231 * pre-emption by a higher priority thread or it was not possible to assign it
232 * a processor since its priority is to low on its current scheduler instance.
233 *
234 * The run-time of this function depends on the size of the resource tree of
235 * the thread needing help and other resource trees in case threads in need for
236 * help are produced during this operation.
237 *
238 * @param[in] needs_help The thread needing help.
239 */
240RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help(
241  Thread_Control *needs_help
242)
243{
244  do {
245    const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );
246
247    needs_help = ( *scheduler->Operations.ask_for_help )(
248      scheduler,
249      needs_help,
250      needs_help
251    );
252
253    if ( needs_help != NULL ) {
254      Scheduler_Ask_for_help_context help_context = { needs_help, NULL };
255
256      _Resource_Iterate(
257        &needs_help->Resource_node,
258        _Scheduler_Ask_for_help_visitor,
259        &help_context
260      );
261
262      needs_help = help_context.next_needs_help;
263    }
264  } while ( needs_help != NULL );
265}
266
267RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
268  Thread_Control *needs_help
269)
270{
271  if (
272    needs_help != NULL
273      && _Resource_Node_owns_resources( &needs_help->Resource_node )
274  ) {
275    Scheduler_Node *node = _Scheduler_Thread_get_own_node( needs_help );
276
277    if (
278      node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
279        || _Scheduler_Node_get_user( node ) != needs_help
280    ) {
281      _Scheduler_Ask_for_help( needs_help );
282    }
283  }
284}
285#endif
286
287/**
288 * @brief Scheduler yield with a particular thread.
289 *
290 * This routine is invoked when a thread wishes to voluntarily transfer control
291 * of the processor to another thread.
292 *
293 * @param[in] the_thread The yielding thread.
294 */
295RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
296{
297  const Scheduler_Control *scheduler;
298  ISR_lock_Context         lock_context;
299#if defined(RTEMS_SMP)
300  Thread_Control          *needs_help;
301#endif
302
303  scheduler = _Scheduler_Get( the_thread );
304  _Scheduler_Acquire_critical( scheduler, &lock_context );
305
306#if defined(RTEMS_SMP)
307  needs_help =
308#endif
309  ( *scheduler->Operations.yield )( scheduler, the_thread );
310
311#if defined(RTEMS_SMP)
312  _Scheduler_Ask_for_help_if_necessary( needs_help );
313#endif
314
315  _Scheduler_Release_critical( scheduler, &lock_context );
316}
317
318/**
319 * @brief Blocks a thread with respect to the scheduler.
320 *
321 * This routine removes @a the_thread from the scheduling decision for
322 * the scheduler. The primary task is to remove the thread from the
323 * ready queue.  It performs any necessary schedulering operations
324 * including the selection of a new heir thread.
325 *
326 * @param[in] the_thread The thread.
327 */
328RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
329{
330  const Scheduler_Control *scheduler;
331  ISR_lock_Context         lock_context;
332
333  scheduler = _Scheduler_Get( the_thread );
334  _Scheduler_Acquire_critical( scheduler, &lock_context );
335
336  ( *scheduler->Operations.block )( scheduler, the_thread );
337
338  _Scheduler_Release_critical( scheduler, &lock_context );
339}
340
341/**
342 * @brief Unblocks a thread with respect to the scheduler.
343 *
344 * This operation must fetch the latest thread priority value for this
345 * scheduler instance and update its internal state if necessary.
346 *
347 * @param[in] the_thread The thread.
348 *
349 * @see _Scheduler_Node_get_priority().
350 */
351RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
352{
353  const Scheduler_Control *scheduler;
354  ISR_lock_Context         lock_context;
355#if defined(RTEMS_SMP)
356  Thread_Control          *needs_help;
357#endif
358
359  scheduler = _Scheduler_Get( the_thread );
360  _Scheduler_Acquire_critical( scheduler, &lock_context );
361
362#if defined(RTEMS_SMP)
363  needs_help =
364#endif
365  ( *scheduler->Operations.unblock )( scheduler, the_thread );
366
367#if defined(RTEMS_SMP)
368  _Scheduler_Ask_for_help_if_necessary( needs_help );
369#endif
370
371  _Scheduler_Release_critical( scheduler, &lock_context );
372}
373
374/**
375 * @brief Propagates a priority change of a thread to the scheduler.
376 *
377 * On uni-processor configurations, this operation must evaluate the thread
378 * state.  In case the thread is not ready, then the priority update should be
379 * deferred to the next scheduler unblock operation.
380 *
381 * The operation must update the heir and thread dispatch necessary variables
382 * in case the set of scheduled threads changes.
383 *
384 * @param[in] the_thread The thread changing its priority.
385 *
386 * @see _Scheduler_Node_get_priority().
387 */
388RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
389{
390  const Scheduler_Control *own_scheduler;
391  ISR_lock_Context         lock_context;
392#if defined(RTEMS_SMP)
393  Thread_Control          *needs_help;
394#endif
395
396  own_scheduler = _Scheduler_Get_own( the_thread );
397  _Scheduler_Acquire_critical( own_scheduler, &lock_context );
398
399#if defined(RTEMS_SMP)
400  needs_help =
401#endif
402  ( *own_scheduler->Operations.update_priority )( own_scheduler, the_thread );
403
404#if defined(RTEMS_SMP)
405  _Scheduler_Ask_for_help_if_necessary( needs_help );
406#endif
407
408  _Scheduler_Release_critical( own_scheduler, &lock_context );
409}
410
411/**
412 * @brief Maps a thread priority from the user domain to the scheduler domain.
413 *
414 * Let M be the maximum scheduler priority.  The mapping must be bijective in
415 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
416 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
417 * other values the mapping is undefined.
418 *
419 * @param[in] scheduler The scheduler instance.
420 * @param[in] priority The user domain thread priority.
421 *
422 * @return The corresponding thread priority of the scheduler domain is returned.
423 */
424RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
425  const Scheduler_Control *scheduler,
426  Priority_Control         priority
427)
428{
429  return ( *scheduler->Operations.map_priority )( scheduler, priority );
430}
431
432/**
433 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
434 *
435 * @param[in] scheduler The scheduler instance.
436 * @param[in] priority The scheduler domain thread priority.
437 *
438 * @return The corresponding thread priority of the user domain is returned.
439 */
440RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
441  const Scheduler_Control *scheduler,
442  Priority_Control         priority
443)
444{
445  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
446}
447
448/**
449 * @brief Initializes a scheduler node.
450 *
451 * The scheduler node contains arbitrary data on function entry.  The caller
452 * must ensure that _Scheduler_Node_destroy() will be called after a
453 * _Scheduler_Node_initialize() before the memory of the scheduler node is
454 * destroyed.
455 *
456 * @param[in] scheduler The scheduler instance.
457 * @param[in] the_thread The thread containing the scheduler node.
458 * @param[in] priority The thread priority.
459 */
460RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
461  const Scheduler_Control *scheduler,
462  Thread_Control          *the_thread,
463  Priority_Control         priority
464)
465{
466  ( *scheduler->Operations.node_initialize )(
467    scheduler,
468    the_thread,
469    priority
470  );
471}
472
473/**
474 * @brief Destroys a scheduler node.
475 *
476 * The caller must ensure that _Scheduler_Node_destroy() will be called only
477 * after a corresponding _Scheduler_Node_initialize().
478 *
479 * @param[in] scheduler The scheduler instance.
480 * @param[in] the_thread The thread containing the scheduler node.
481 */
482RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
483  const Scheduler_Control *scheduler,
484  Thread_Control          *the_thread
485)
486{
487  ( *scheduler->Operations.node_destroy )( scheduler, the_thread );
488}
489
490/**
491 * @brief Releases a job of a thread with respect to the scheduler.
492 *
493 * @param[in] the_thread The thread.
494 * @param[in] deadline The deadline in watchdog ticks since boot.
495 */
496RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
497  Thread_Control *the_thread,
498  uint64_t        deadline
499)
500{
501  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
502
503  ( *scheduler->Operations.release_job )( scheduler, the_thread, deadline );
504}
505
506/**
507 * @brief Scheduler method invoked at each clock tick.
508 *
509 * This method is invoked at each clock tick to allow the scheduler
510 * implementation to perform any activities required.  For the
511 * scheduler which support standard RTEMS features, this includes
512 * time-slicing management.
513 */
514RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
515{
516  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
517  Thread_Control *executing = cpu->executing;
518
519  if ( scheduler != NULL && executing != NULL ) {
520    ( *scheduler->Operations.tick )( scheduler, executing );
521  }
522}
523
524/**
525 * @brief Starts the idle thread for a particular processor.
526 *
527 * @param[in] scheduler The scheduler instance.
528 * @param[in,out] the_thread The idle thread for the processor.
529 * @param[in,out] cpu The processor for the idle thread.
530 *
531 * @see _Thread_Create_idle().
532 */
533RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
534  const Scheduler_Control *scheduler,
535  Thread_Control          *the_thread,
536  Per_CPU_Control         *cpu
537)
538{
539  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
540}
541
542#if defined(RTEMS_SMP)
543RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
544  uint32_t cpu_index
545)
546{
547  return &_Scheduler_Assignments[ cpu_index ];
548}
549
550RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
551  const Scheduler_Assignment *assignment
552)
553{
554  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
555}
556
557RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
558  const Scheduler_Assignment *assignment
559)
560{
561  return assignment->scheduler != NULL;
562}
563#endif /* defined(RTEMS_SMP) */
564
565RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
566  const Scheduler_Control *scheduler,
567  uint32_t cpu_index
568)
569{
570#if defined(RTEMS_SMP)
571  const Scheduler_Assignment *assignment =
572    _Scheduler_Get_assignment( cpu_index );
573
574  return assignment->scheduler == scheduler;
575#else
576  (void) scheduler;
577  (void) cpu_index;
578
579  return true;
580#endif
581}
582
583RTEMS_INLINE_ROUTINE bool _Scheduler_Set(
584  const Scheduler_Control *scheduler,
585  Thread_Control          *the_thread
586)
587{
588#if defined(RTEMS_SMP)
589  const Scheduler_Control *current_scheduler;
590  States_Control           current_state;
591
592  current_scheduler = _Scheduler_Get( the_thread );
593
594  if ( current_scheduler == scheduler ) {
595    return true;
596  }
597
598  if ( _Thread_Owns_resources( the_thread ) ) {
599    return false;
600  }
601
602  current_state = the_thread->current_state;
603
604  if ( _States_Is_ready( current_state ) ) {
605    _Scheduler_Block( the_thread );
606  }
607
608  _Scheduler_Node_destroy( current_scheduler, the_thread );
609  the_thread->Scheduler.own_control = scheduler;
610  the_thread->Scheduler.control = scheduler;
611  _Scheduler_Node_initialize(
612    scheduler,
613    the_thread,
614    the_thread->current_priority
615  );
616
617  if ( _States_Is_ready( current_state ) ) {
618    _Scheduler_Unblock( the_thread );
619  }
620
621  return true;
622#else
623  (void) scheduler;
624  return true;
625#endif
626}
627
628#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
629
630RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
631  const Scheduler_Control *scheduler,
632  size_t                   cpusetsize,
633  cpu_set_t               *cpuset
634)
635{
636  uint32_t cpu_count = _SMP_Get_processor_count();
637  uint32_t cpu_index;
638
639  CPU_ZERO_S( cpusetsize, cpuset );
640
641  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
642#if defined(RTEMS_SMP)
643    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
644      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
645    }
646#else
647    (void) scheduler;
648
649    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
650#endif
651  }
652}
653
654RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
655  const Scheduler_Control *scheduler,
656  Thread_Control          *the_thread,
657  size_t                   cpusetsize,
658  cpu_set_t               *cpuset
659)
660{
661  (void) the_thread;
662
663  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
664
665  return true;
666}
667
668bool _Scheduler_Get_affinity(
669  Thread_Control *the_thread,
670  size_t          cpusetsize,
671  cpu_set_t      *cpuset
672);
673
674RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
675  const Scheduler_Control *scheduler,
676  Thread_Control          *the_thread,
677  size_t                   cpusetsize,
678  const cpu_set_t         *cpuset
679)
680{
681  uint32_t cpu_count = _SMP_Get_processor_count();
682  uint32_t cpu_index;
683  bool     ok = true;
684
685  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
686#if defined(RTEMS_SMP)
687    const Scheduler_Control *scheduler_of_cpu =
688      _Scheduler_Get_by_CPU_index( cpu_index );
689
690    ok = ok
691      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
692        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
693          && scheduler != scheduler_of_cpu ) );
694#else
695    (void) scheduler;
696
697    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
698#endif
699  }
700
701  return ok;
702}
703
704bool _Scheduler_Set_affinity(
705  Thread_Control  *the_thread,
706  size_t           cpusetsize,
707  const cpu_set_t *cpuset
708);
709
710#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
711
712RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
713  const Scheduler_Control *scheduler,
714  Thread_Control          *the_thread,
715  void                  ( *extract )(
716                             const Scheduler_Control *,
717                             Thread_Control * ),
718  void                  ( *schedule )(
719                             const Scheduler_Control *,
720                             Thread_Control *,
721                             bool )
722)
723{
724  ( *extract )( scheduler, the_thread );
725
726  /* TODO: flash critical section? */
727
728  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
729    ( *schedule )( scheduler, the_thread, true );
730  }
731}
732
733RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
734  const Scheduler_Control *scheduler
735)
736{
737#if defined(RTEMS_SMP)
738  return _Scheduler_Get_context( scheduler )->processor_count;
739#else
740  (void) scheduler;
741
742  return 1;
743#endif
744}
745
746RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
747{
748  return _Objects_Build_id(
749    OBJECTS_FAKE_OBJECTS_API,
750    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
751    _Objects_Local_node,
752    scheduler_index + 1
753  );
754}
755
756RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
757{
758  uint32_t minimum_id = _Scheduler_Build_id( 0 );
759
760  return id - minimum_id;
761}
762
763RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
764  Objects_Id                id,
765  const Scheduler_Control **scheduler_p
766)
767{
768  uint32_t index = _Scheduler_Get_index_by_id( id );
769  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
770
771  *scheduler_p = scheduler;
772
773  return index < _Scheduler_Count
774    && _Scheduler_Get_processor_count( scheduler ) > 0;
775}
776
777RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
778{
779  const Scheduler_Control *scheduler;
780  bool ok = _Scheduler_Get_by_id( id, &scheduler );
781
782  (void) scheduler;
783
784  return ok;
785}
786
787RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
788  const Scheduler_Control *scheduler
789)
790{
791  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
792}
793
794RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
795  const Thread_Control *the_thread
796)
797{
798  return the_thread->Scheduler.node;
799}
800
801RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
802  Scheduler_Node   *node,
803  Thread_Control   *the_thread,
804  Priority_Control  priority
805)
806{
807  node->Priority.value = priority;
808  node->Priority.prepend_it = false;
809
810#if defined(RTEMS_SMP)
811  node->user = the_thread;
812  node->help_state = SCHEDULER_HELP_YOURSELF;
813  node->owner = the_thread;
814  node->idle = NULL;
815  node->accepts_help = the_thread;
816  _SMP_sequence_lock_Initialize( &node->Priority.Lock );
817#else
818  (void) the_thread;
819#endif
820}
821
822RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Node_get_priority(
823  Scheduler_Node *node,
824  bool           *prepend_it_p
825)
826{
827  Priority_Control priority;
828  bool             prepend_it;
829
830#if defined(RTEMS_SMP)
831  unsigned int     seq;
832
833  do {
834    seq = _SMP_sequence_lock_Read_begin( &node->Priority.Lock );
835#endif
836
837    priority = node->Priority.value;
838    prepend_it = node->Priority.prepend_it;
839
840#if defined(RTEMS_SMP)
841  } while ( _SMP_sequence_lock_Read_retry( &node->Priority.Lock, seq ) );
842#endif
843
844  *prepend_it_p = prepend_it;
845
846  return priority;
847}
848
849RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_priority(
850  Scheduler_Node   *node,
851  Priority_Control  new_priority,
852  bool              prepend_it
853)
854{
855#if defined(RTEMS_SMP)
856  unsigned int seq;
857
858  seq = _SMP_sequence_lock_Write_begin( &node->Priority.Lock );
859#endif
860
861  node->Priority.value = new_priority;
862  node->Priority.prepend_it = prepend_it;
863
864#if defined(RTEMS_SMP)
865  _SMP_sequence_lock_Write_end( &node->Priority.Lock, seq );
866#endif
867}
868
869#if defined(RTEMS_SMP)
870/**
871 * @brief Gets an idle thread from the scheduler instance.
872 *
873 * @param[in] context The scheduler instance context.
874 *
875 * @retval idle An idle thread for use.  This function must always return an
876 * idle thread.  If none is available, then this is a fatal error.
877 */
878typedef Thread_Control *( *Scheduler_Get_idle_thread )(
879  Scheduler_Context *context
880);
881
882/**
883 * @brief Releases an idle thread to the scheduler instance for reuse.
884 *
885 * @param[in] context The scheduler instance context.
886 * @param[in] idle The idle thread to release
887 */
888typedef void ( *Scheduler_Release_idle_thread )(
889  Scheduler_Context *context,
890  Thread_Control    *idle
891);
892
893RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner(
894  const Scheduler_Node *node
895)
896{
897  return node->owner;
898}
899
900RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle(
901  const Scheduler_Node *node
902)
903{
904  return node->idle;
905}
906
907RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_user(
908  Scheduler_Node *node,
909  Thread_Control *user
910)
911{
912  node->user = user;
913}
914
915RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
916  Thread_Control *the_thread,
917  Scheduler_Node *node
918)
919{
920  the_thread->Scheduler.node = node;
921}
922
923RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
924  Thread_Control       *the_thread,
925  Scheduler_Node       *node,
926  const Thread_Control *previous_user_of_node
927)
928{
929  const Scheduler_Control *scheduler =
930    _Scheduler_Get_own( previous_user_of_node );
931
932  the_thread->Scheduler.control = scheduler;
933  _Scheduler_Thread_set_node( the_thread, node );
934}
935
936extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
937
938RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
939  Thread_Control         *the_thread,
940  Thread_Scheduler_state  new_state
941)
942{
943  _Assert(
944    _Scheduler_Thread_state_valid_state_changes
945      [ the_thread->Scheduler.state ][ new_state ]
946  );
947
948  the_thread->Scheduler.state = new_state;
949}
950
951/**
952 * @brief Changes the scheduler help state of a thread.
953 *
954 * @param[in] the_thread The thread.
955 * @param[in] new_help_state The new help state.
956 *
957 * @return The previous help state.
958 */
959RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
960  Thread_Control       *the_thread,
961  Scheduler_Help_state  new_help_state
962)
963{
964  Scheduler_Node *node = _Scheduler_Thread_get_own_node( the_thread );
965  Scheduler_Help_state previous_help_state = node->help_state;
966
967  node->help_state = new_help_state;
968
969  return previous_help_state;
970}
971
972/**
973 * @brief Changes the resource tree root of a thread.
974 *
975 * For each node of the resource sub-tree specified by the top thread the
976 * scheduler asks for help.  So the root thread gains access to all scheduler
977 * nodes corresponding to the resource sub-tree.  In case a thread previously
978 * granted help is displaced by this operation, then the scheduler asks for
979 * help using its remaining resource tree.
980 *
981 * The run-time of this function depends on the size of the resource sub-tree
982 * and other resource trees in case threads in need for help are produced
983 * during this operation.
984 *
985 * @param[in] top The thread specifying the resource sub-tree top.
986 * @param[in] root The thread specifying the new resource sub-tree root.
987 */
988void _Scheduler_Thread_change_resource_root(
989  Thread_Control *top,
990  Thread_Control *root
991);
992
993RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
994  Scheduler_Node *node,
995  Thread_Control *idle
996)
997{
998  _Assert(
999    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1000      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1001  );
1002  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
1003  _Assert(
1004    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
1005  );
1006
1007  _Scheduler_Thread_set_node( idle, node );
1008
1009  _Scheduler_Node_set_user( node, idle );
1010  node->idle = idle;
1011}
1012
1013/**
1014 * @brief Use an idle thread for this scheduler node.
1015 *
1016 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
1017 * helping state may use an idle thread for the scheduler node owned by itself
1018 * in case it executes currently using another scheduler node or in case it is
1019 * in a blocking state.
1020 *
1021 * @param[in] context The scheduler instance context.
1022 * @param[in] node The node which wants to use the idle thread.
1023 * @param[in] get_idle_thread Function to get an idle thread.
1024 */
1025RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
1026  Scheduler_Context         *context,
1027  Scheduler_Node            *node,
1028  Scheduler_Get_idle_thread  get_idle_thread
1029)
1030{
1031  Thread_Control *idle = ( *get_idle_thread )( context );
1032
1033  _Scheduler_Set_idle_thread( node, idle );
1034
1035  return idle;
1036}
1037
1038typedef enum {
1039  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
1040  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
1041  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
1042} Scheduler_Try_to_schedule_action;
1043
1044/**
1045 * @brief Try to schedule this scheduler node.
1046 *
1047 * @param[in] context The scheduler instance context.
1048 * @param[in] node The node which wants to get scheduled.
1049 * @param[in] idle A potential idle thread used by a potential victim node.
1050 * @param[in] get_idle_thread Function to get an idle thread.
1051 *
1052 * @retval true This node can be scheduled.
1053 * @retval false Otherwise.
1054 */
1055RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
1056_Scheduler_Try_to_schedule_node(
1057  Scheduler_Context         *context,
1058  Scheduler_Node            *node,
1059  Thread_Control            *idle,
1060  Scheduler_Get_idle_thread  get_idle_thread
1061)
1062{
1063  Scheduler_Try_to_schedule_action action;
1064  Thread_Control *owner;
1065  Thread_Control *user;
1066
1067  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
1068
1069  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1070    return action;
1071  }
1072
1073  owner = _Scheduler_Node_get_owner( node );
1074  user = _Scheduler_Node_get_user( node );
1075
1076  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
1077    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1078      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1079    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
1080      if ( idle != NULL ) {
1081        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1082      } else {
1083        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1084      }
1085    } else {
1086      _Scheduler_Node_set_user( node, owner );
1087    }
1088  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1089    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1090      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1091    } else if ( idle != NULL ) {
1092      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1093    } else {
1094      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1095    }
1096  } else {
1097    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1098
1099    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1100      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1101    } else {
1102      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1103    }
1104  }
1105
1106  return action;
1107}
1108
1109/**
1110 * @brief Release an idle thread using this scheduler node.
1111 *
1112 * @param[in] context The scheduler instance context.
1113 * @param[in] node The node which may have an idle thread as user.
1114 * @param[in] release_idle_thread Function to release an idle thread.
1115 *
1116 * @retval idle The idle thread which used this node.
1117 * @retval NULL This node had no idle thread as an user.
1118 */
1119RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1120  Scheduler_Context             *context,
1121  Scheduler_Node                *node,
1122  Scheduler_Release_idle_thread  release_idle_thread
1123)
1124{
1125  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1126
1127  if ( idle != NULL ) {
1128    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1129
1130    node->idle = NULL;
1131    _Scheduler_Node_set_user( node, owner );
1132    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
1133    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
1134
1135    ( *release_idle_thread )( context, idle );
1136  }
1137
1138  return idle;
1139}
1140
1141RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1142  Scheduler_Node *needs_idle,
1143  Scheduler_Node *uses_idle,
1144  Thread_Control *idle
1145)
1146{
1147  uses_idle->idle = NULL;
1148  _Scheduler_Node_set_user(
1149    uses_idle,
1150    _Scheduler_Node_get_owner( uses_idle )
1151  );
1152  _Scheduler_Set_idle_thread( needs_idle, idle );
1153}
1154
1155/**
1156 * @brief Block this scheduler node.
1157 *
1158 * @param[in] context The scheduler instance context.
1159 * @param[in] thread The thread which wants to get blocked referencing this
1160 *   node.  This is not necessarily the user of this node in case the node
1161 *   participates in the scheduler helping protocol.
1162 * @param[in] node The node which wants to get blocked.
1163 * @param[in] is_scheduled This node is scheduled.
1164 * @param[in] get_idle_thread Function to get an idle thread.
1165 *
1166 * @retval true Continue with the blocking operation.
1167 * @retval false Otherwise.
1168 */
1169RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
1170  Scheduler_Context         *context,
1171  Thread_Control            *thread,
1172  Scheduler_Node            *node,
1173  bool                       is_scheduled,
1174  Scheduler_Get_idle_thread  get_idle_thread
1175)
1176{
1177  Thread_Control *old_user;
1178  Thread_Control *new_user;
1179
1180  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1181
1182  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1183    _Assert( thread == _Scheduler_Node_get_user( node ) );
1184
1185    return true;
1186  }
1187
1188  new_user = NULL;
1189
1190  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1191    if ( is_scheduled ) {
1192      _Assert( thread == _Scheduler_Node_get_user( node ) );
1193      old_user = thread;
1194      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1195    }
1196  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
1197    if ( is_scheduled ) {
1198      old_user = _Scheduler_Node_get_user( node );
1199
1200      if ( thread == old_user ) {
1201        Thread_Control *owner = _Scheduler_Node_get_owner( node );
1202
1203        if (
1204          thread != owner
1205            && owner->Scheduler.state == THREAD_SCHEDULER_READY
1206        ) {
1207          new_user = owner;
1208          _Scheduler_Node_set_user( node, new_user );
1209        } else {
1210          new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1211        }
1212      }
1213    }
1214  } else {
1215    /* Not implemented, this is part of the OMIP support path. */
1216    _Assert(0);
1217  }
1218
1219  if ( new_user != NULL ) {
1220    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1221
1222    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1223    _Thread_Set_CPU( new_user, cpu );
1224    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1225  }
1226
1227  return false;
1228}
1229
1230/**
1231 * @brief Unblock this scheduler node.
1232 *
1233 * @param[in] context The scheduler instance context.
1234 * @param[in] the_thread The thread which wants to get unblocked.
1235 * @param[in] node The node which wants to get unblocked.
1236 * @param[in] is_scheduled This node is scheduled.
1237 * @param[in] release_idle_thread Function to release an idle thread.
1238 *
1239 * @retval true Continue with the unblocking operation.
1240 * @retval false Otherwise.
1241 */
1242RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1243  Scheduler_Context             *context,
1244  Thread_Control                *the_thread,
1245  Scheduler_Node                *node,
1246  bool                           is_scheduled,
1247  Scheduler_Release_idle_thread  release_idle_thread
1248)
1249{
1250  bool unblock;
1251
1252  if ( is_scheduled ) {
1253    Thread_Control *old_user = _Scheduler_Node_get_user( node );
1254    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1255    Thread_Control *idle = _Scheduler_Release_idle_thread(
1256      context,
1257      node,
1258      release_idle_thread
1259    );
1260    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1261    Thread_Control *new_user;
1262
1263    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1264      _Assert( idle != NULL );
1265      new_user = the_thread;
1266    } else if ( idle != NULL ) {
1267      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1268      new_user = the_thread;
1269    } else if ( the_thread != owner ) {
1270      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1271      _Assert( old_user != the_thread );
1272      _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
1273      new_user = the_thread;
1274      _Scheduler_Node_set_user( node, new_user );
1275    } else {
1276      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1277      _Assert( old_user != the_thread );
1278      _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1279      new_user = NULL;
1280    }
1281
1282    if ( new_user != NULL ) {
1283      _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1284      _Thread_Set_CPU( new_user, cpu );
1285      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1286    }
1287
1288    unblock = false;
1289  } else {
1290    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1291
1292    unblock = true;
1293  }
1294
1295  return unblock;
1296}
1297
1298/**
1299 * @brief Asks a ready scheduler node for help.
1300 *
1301 * @param[in] node The ready node offering help.
1302 * @param[in] needs_help The thread needing help.
1303 *
1304 * @retval needs_help The thread needing help.
1305 */
1306RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
1307  Scheduler_Node *node,
1308  Thread_Control *needs_help
1309)
1310{
1311  _Scheduler_Node_set_user( node, needs_help );
1312
1313  return needs_help;
1314}
1315
1316/**
1317 * @brief Asks a scheduled scheduler node for help.
1318 *
1319 * @param[in] context The scheduler instance context.
1320 * @param[in] node The scheduled node offering help.
1321 * @param[in] offers_help The thread offering help.
1322 * @param[in] needs_help The thread needing help.
1323 * @param[in] previous_accepts_help The previous thread accepting help by this
1324 *   scheduler node.
1325 * @param[in] release_idle_thread Function to release an idle thread.
1326 *
1327 * @retval needs_help The previous thread accepting help by this scheduler node
1328 *   which was displaced by the thread needing help.
1329 * @retval NULL There are no more threads needing help.
1330 */
1331RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
1332  Scheduler_Context             *context,
1333  Scheduler_Node                *node,
1334  Thread_Control                *offers_help,
1335  Thread_Control                *needs_help,
1336  Thread_Control                *previous_accepts_help,
1337  Scheduler_Release_idle_thread  release_idle_thread
1338)
1339{
1340  Thread_Control *next_needs_help = NULL;
1341  Thread_Control *old_user = NULL;
1342  Thread_Control *new_user = NULL;
1343
1344  if (
1345    previous_accepts_help != needs_help
1346      && _Scheduler_Thread_get_node( previous_accepts_help ) == node
1347  ) {
1348    Thread_Control *idle = _Scheduler_Release_idle_thread(
1349      context,
1350      node,
1351      release_idle_thread
1352    );
1353
1354    if ( idle != NULL ) {
1355      old_user = idle;
1356    } else {
1357      _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
1358      old_user = previous_accepts_help;
1359    }
1360
1361    if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1362      new_user = needs_help;
1363    } else {
1364      _Assert(
1365        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1366          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1367      );
1368      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
1369
1370      new_user = offers_help;
1371    }
1372
1373    if ( previous_accepts_help != offers_help ) {
1374      next_needs_help = previous_accepts_help;
1375    }
1376  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1377    Thread_Control *idle = _Scheduler_Release_idle_thread(
1378      context,
1379      node,
1380      release_idle_thread
1381    );
1382
1383    if ( idle != NULL ) {
1384      old_user = idle;
1385    } else {
1386      old_user = _Scheduler_Node_get_user( node );
1387    }
1388
1389    new_user = needs_help;
1390  } else {
1391    _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
1392  }
1393
1394  if ( new_user != old_user ) {
1395    Per_CPU_Control *cpu_self = _Per_CPU_Get();
1396    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1397
1398    _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
1399    _Scheduler_Thread_set_scheduler_and_node(
1400      old_user,
1401      _Scheduler_Thread_get_own_node( old_user ),
1402      old_user
1403    );
1404
1405    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1406    _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
1407
1408    _Scheduler_Node_set_user( node, new_user );
1409    _Thread_Set_CPU( new_user, cpu );
1410    _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
1411  }
1412
1413  return next_needs_help;
1414}
1415
1416/**
1417 * @brief Asks a blocked scheduler node for help.
1418 *
1419 * @param[in] context The scheduler instance context.
1420 * @param[in] node The scheduled node offering help.
1421 * @param[in] offers_help The thread offering help.
1422 * @param[in] needs_help The thread needing help.
1423 *
1424 * @retval true Enqueue this scheduler node.
1425 * @retval false Otherwise.
1426 */
1427RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
1428  Scheduler_Context *context,
1429  Scheduler_Node    *node,
1430  Thread_Control    *offers_help,
1431  Thread_Control    *needs_help
1432)
1433{
1434  bool enqueue;
1435
1436  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1437
1438  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1439    _Scheduler_Node_set_user( node, needs_help );
1440    _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
1441
1442    enqueue = true;
1443  } else {
1444    enqueue = false;
1445  }
1446
1447  return enqueue;
1448}
1449#endif
1450
1451RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1452  Thread_Control *new_heir,
1453  bool            force_dispatch
1454)
1455{
1456  Thread_Control *heir = _Thread_Heir;
1457
1458  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1459#if defined(RTEMS_SMP)
1460    /*
1461     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1462     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1463     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1464     * schedulers.
1465     */
1466    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1467    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1468#endif
1469    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1470    _Thread_Heir = new_heir;
1471    _Thread_Dispatch_necessary = true;
1472  }
1473}
1474
1475/** @} */
1476
1477#ifdef __cplusplus
1478}
1479#endif
1480
1481#endif
1482/* end of include file */
Note: See TracBrowser for help on using the repository browser.