source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ edb020c

Last change on this file since edb020c was edb020c, checked in by Sebastian Huber <sebastian.huber@…>, on Oct 14, 2016 at 11:03:46 AM

score: Protect thread CPU by thread scheduler lock

Update #2556.

  • Property mode set to 100644
File size: 41.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/cpusetimpl.h>
26#include <rtems/score/priorityimpl.h>
27#include <rtems/score/smpimpl.h>
28#include <rtems/score/status.h>
29#include <rtems/score/threadimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35/**
36 * @addtogroup ScoreScheduler
37 */
38/**@{**/
39
40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
57  const Thread_Control *the_thread
58)
59{
60#if defined(RTEMS_SMP)
61  return the_thread->Scheduler.control;
62#else
63  (void) the_thread;
64
65  return &_Scheduler_Table[ 0 ];
66#endif
67}
68
69RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
70  const Thread_Control *the_thread
71)
72{
73#if defined(RTEMS_SMP)
74  return the_thread->Scheduler.own_control;
75#else
76  (void) the_thread;
77
78  return &_Scheduler_Table[ 0 ];
79#endif
80}
81
82RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
83  uint32_t cpu_index
84)
85{
86#if defined(RTEMS_SMP)
87  return _Scheduler_Assignments[ cpu_index ].scheduler;
88#else
89  (void) cpu_index;
90
91  return &_Scheduler_Table[ 0 ];
92#endif
93}
94
95RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
96  const Per_CPU_Control *cpu
97)
98{
99  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
100
101  return _Scheduler_Get_by_CPU_index( cpu_index );
102}
103
104ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
105
106/**
107 * @brief Acquires the scheduler instance inside a critical section (interrupts
108 * disabled).
109 *
110 * @param[in] scheduler The scheduler instance.
111 * @param[in] lock_context The lock context to use for
112 *   _Scheduler_Release_critical().
113 */
114RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
115  const Scheduler_Control *scheduler,
116  ISR_lock_Context        *lock_context
117)
118{
119  (void) scheduler;
120  _ISR_lock_Acquire( &_Scheduler_Lock, lock_context );
121}
122
123/**
124 * @brief Releases the scheduler instance inside a critical section (interrupts
125 * disabled).
126 *
127 * @param[in] scheduler The scheduler instance.
128 * @param[in] lock_context The lock context used for
129 *   _Scheduler_Acquire_critical().
130 */
131RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
132  const Scheduler_Control *scheduler,
133  ISR_lock_Context        *lock_context
134)
135{
136  (void) scheduler;
137  _ISR_lock_Release( &_Scheduler_Lock, lock_context );
138}
139
140RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
141  const Thread_Control *the_thread
142)
143{
144#if defined(RTEMS_SMP)
145  return the_thread->Scheduler.node;
146#else
147  return the_thread->Scheduler.nodes;
148#endif
149}
150
151/**
152 * The preferred method to add a new scheduler is to define the jump table
153 * entries and add a case to the _Scheduler_Initialize routine.
154 *
155 * Generic scheduling implementations that rely on the ready queue only can
156 * be found in the _Scheduler_queue_XXX functions.
157 */
158
159/*
160 * Passing the Scheduler_Control* to these functions allows for multiple
161 * scheduler's to exist simultaneously, which could be useful on an SMP
162 * system.  Then remote Schedulers may be accessible.  How to protect such
163 * accesses remains an open problem.
164 */
165
166/**
167 * @brief General scheduling decision.
168 *
169 * This kernel routine implements the scheduling decision logic for
170 * the scheduler. It does NOT dispatch.
171 *
172 * @param[in] the_thread The thread which state changed previously.
173 */
174RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
175{
176  const Scheduler_Control *scheduler;
177  ISR_lock_Context         lock_context;
178
179  scheduler = _Scheduler_Get( the_thread );
180  _Scheduler_Acquire_critical( scheduler, &lock_context );
181
182  ( *scheduler->Operations.schedule )( scheduler, the_thread );
183
184  _Scheduler_Release_critical( scheduler, &lock_context );
185}
186
187#if defined(RTEMS_SMP)
188typedef struct {
189  Thread_Control *needs_help;
190  Thread_Control *next_needs_help;
191} Scheduler_Ask_for_help_context ;
192
193RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
194  Resource_Node *resource_node,
195  void          *arg
196)
197{
198  bool done;
199  Scheduler_Ask_for_help_context *help_context = arg;
200  Thread_Control *previous_needs_help = help_context->needs_help;
201  Thread_Control *next_needs_help;
202  Thread_Control *offers_help =
203    THREAD_RESOURCE_NODE_TO_THREAD( resource_node );
204  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
205
206  next_needs_help = ( *scheduler->Operations.ask_for_help_X )(
207    scheduler,
208    offers_help,
209    previous_needs_help
210  );
211
212  done = next_needs_help != previous_needs_help;
213
214  if ( done ) {
215    help_context->next_needs_help = next_needs_help;
216  }
217
218  return done;
219}
220
221/**
222 * @brief Ask threads depending on resources owned by the thread for help.
223 *
224 * A thread is in need for help if it lost its assigned processor due to
225 * pre-emption by a higher priority thread or it was not possible to assign it
226 * a processor since its priority is to low on its current scheduler instance.
227 *
228 * The run-time of this function depends on the size of the resource tree of
229 * the thread needing help and other resource trees in case threads in need for
230 * help are produced during this operation.
231 *
232 * @param[in] needs_help The thread needing help.
233 */
234RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_X(
235  Thread_Control *needs_help
236)
237{
238  do {
239    const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );
240
241    needs_help = ( *scheduler->Operations.ask_for_help_X )(
242      scheduler,
243      needs_help,
244      needs_help
245    );
246
247    if ( needs_help != NULL ) {
248      Scheduler_Ask_for_help_context help_context = { needs_help, NULL };
249
250      _Resource_Iterate(
251        &needs_help->Resource_node,
252        _Scheduler_Ask_for_help_visitor,
253        &help_context
254      );
255
256      needs_help = help_context.next_needs_help;
257    }
258  } while ( needs_help != NULL );
259}
260
261RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
262  Thread_Control *needs_help
263)
264{
265  if (
266    needs_help != NULL
267      && _Resource_Node_owns_resources( &needs_help->Resource_node )
268  ) {
269    Scheduler_Node *node = _Thread_Scheduler_get_own_node( needs_help );
270
271    if (
272      node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
273        || _Scheduler_Node_get_user( node ) != needs_help
274    ) {
275      _Scheduler_Ask_for_help_X( needs_help );
276    }
277  }
278}
279#endif
280
281/**
282 * @brief Scheduler yield with a particular thread.
283 *
284 * This routine is invoked when a thread wishes to voluntarily transfer control
285 * of the processor to another thread.
286 *
287 * @param[in] the_thread The yielding thread.
288 */
289RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
290{
291  const Scheduler_Control *scheduler;
292  ISR_lock_Context         lock_context;
293#if defined(RTEMS_SMP)
294  Thread_Control          *needs_help;
295#endif
296
297  scheduler = _Scheduler_Get( the_thread );
298  _Scheduler_Acquire_critical( scheduler, &lock_context );
299
300#if defined(RTEMS_SMP)
301  needs_help =
302#endif
303  ( *scheduler->Operations.yield )(
304    scheduler,
305    the_thread,
306    _Thread_Scheduler_get_home_node( the_thread )
307  );
308
309#if defined(RTEMS_SMP)
310  _Scheduler_Ask_for_help_if_necessary( needs_help );
311#endif
312
313  _Scheduler_Release_critical( scheduler, &lock_context );
314}
315
316/**
317 * @brief Blocks a thread with respect to the scheduler.
318 *
319 * This routine removes @a the_thread from the scheduling decision for
320 * the scheduler. The primary task is to remove the thread from the
321 * ready queue.  It performs any necessary schedulering operations
322 * including the selection of a new heir thread.
323 *
324 * @param[in] the_thread The thread.
325 */
326RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
327{
328  const Scheduler_Control *scheduler;
329  ISR_lock_Context         lock_context;
330
331  scheduler = _Scheduler_Get( the_thread );
332  _Scheduler_Acquire_critical( scheduler, &lock_context );
333
334  ( *scheduler->Operations.block )(
335    scheduler,
336    the_thread,
337    _Thread_Scheduler_get_home_node( the_thread )
338  );
339
340  _Scheduler_Release_critical( scheduler, &lock_context );
341}
342
343/**
344 * @brief Unblocks a thread with respect to the scheduler.
345 *
346 * This operation must fetch the latest thread priority value for this
347 * scheduler instance and update its internal state if necessary.
348 *
349 * @param[in] the_thread The thread.
350 *
351 * @see _Scheduler_Node_get_priority().
352 */
353RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
354{
355  const Scheduler_Control *scheduler;
356  ISR_lock_Context         lock_context;
357#if defined(RTEMS_SMP)
358  Thread_Control          *needs_help;
359#endif
360
361  scheduler = _Scheduler_Get( the_thread );
362  _Scheduler_Acquire_critical( scheduler, &lock_context );
363
364#if defined(RTEMS_SMP)
365  needs_help =
366#endif
367  ( *scheduler->Operations.unblock )(
368    scheduler,
369    the_thread,
370    _Thread_Scheduler_get_home_node( the_thread )
371  );
372
373#if defined(RTEMS_SMP)
374  _Scheduler_Ask_for_help_if_necessary( needs_help );
375#endif
376
377  _Scheduler_Release_critical( scheduler, &lock_context );
378}
379
380/**
381 * @brief Propagates a priority change of a thread to the scheduler.
382 *
383 * On uni-processor configurations, this operation must evaluate the thread
384 * state.  In case the thread is not ready, then the priority update should be
385 * deferred to the next scheduler unblock operation.
386 *
387 * The operation must update the heir and thread dispatch necessary variables
388 * in case the set of scheduled threads changes.
389 *
390 * @param[in] the_thread The thread changing its priority.
391 *
392 * @see _Scheduler_Node_get_priority().
393 */
394RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
395{
396  const Scheduler_Control *own_scheduler;
397  ISR_lock_Context         lock_context;
398#if defined(RTEMS_SMP)
399  Thread_Control          *needs_help;
400#endif
401
402  own_scheduler = _Scheduler_Get_own( the_thread );
403  _Scheduler_Acquire_critical( own_scheduler, &lock_context );
404
405#if defined(RTEMS_SMP)
406  needs_help =
407#endif
408  ( *own_scheduler->Operations.update_priority )(
409    own_scheduler,
410    the_thread,
411    _Thread_Scheduler_get_home_node( the_thread )
412  );
413
414#if defined(RTEMS_SMP)
415  _Scheduler_Ask_for_help_if_necessary( needs_help );
416#endif
417
418  _Scheduler_Release_critical( own_scheduler, &lock_context );
419}
420
421/**
422 * @brief Maps a thread priority from the user domain to the scheduler domain.
423 *
424 * Let M be the maximum scheduler priority.  The mapping must be bijective in
425 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
426 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
427 * other values the mapping is undefined.
428 *
429 * @param[in] scheduler The scheduler instance.
430 * @param[in] priority The user domain thread priority.
431 *
432 * @return The corresponding thread priority of the scheduler domain is returned.
433 */
434RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
435  const Scheduler_Control *scheduler,
436  Priority_Control         priority
437)
438{
439  return ( *scheduler->Operations.map_priority )( scheduler, priority );
440}
441
442/**
443 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
444 *
445 * @param[in] scheduler The scheduler instance.
446 * @param[in] priority The scheduler domain thread priority.
447 *
448 * @return The corresponding thread priority of the user domain is returned.
449 */
450RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
451  const Scheduler_Control *scheduler,
452  Priority_Control         priority
453)
454{
455  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
456}
457
458/**
459 * @brief Initializes a scheduler node.
460 *
461 * The scheduler node contains arbitrary data on function entry.  The caller
462 * must ensure that _Scheduler_Node_destroy() will be called after a
463 * _Scheduler_Node_initialize() before the memory of the scheduler node is
464 * destroyed.
465 *
466 * @param[in] scheduler The scheduler instance.
467 * @param[in] node The scheduler node to initialize.
468 * @param[in] the_thread The thread of the scheduler node to initialize.
469 * @param[in] priority The thread priority.
470 */
471RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
472  const Scheduler_Control *scheduler,
473  Scheduler_Node          *node,
474  Thread_Control          *the_thread,
475  Priority_Control         priority
476)
477{
478  ( *scheduler->Operations.node_initialize )(
479    scheduler,
480    node,
481    the_thread,
482    priority
483  );
484}
485
486/**
487 * @brief Destroys a scheduler node.
488 *
489 * The caller must ensure that _Scheduler_Node_destroy() will be called only
490 * after a corresponding _Scheduler_Node_initialize().
491 *
492 * @param[in] scheduler The scheduler instance.
493 * @param[in] node The scheduler node to destroy.
494 */
495RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
496  const Scheduler_Control *scheduler,
497  Scheduler_Node          *node
498)
499{
500  ( *scheduler->Operations.node_destroy )( scheduler, node );
501}
502
503/**
504 * @brief Releases a job of a thread with respect to the scheduler.
505 *
506 * @param[in] the_thread The thread.
507 * @param[in] priority_node The priority node of the job.
508 * @param[in] deadline The deadline in watchdog ticks since boot.
509 * @param[in] queue_context The thread queue context to provide the set of
510 *   threads for _Thread_Priority_update().
511 */
512RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
513  Thread_Control       *the_thread,
514  Priority_Node        *priority_node,
515  uint64_t              deadline,
516  Thread_queue_Context *queue_context
517)
518{
519  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
520
521  _Thread_queue_Context_clear_priority_updates( queue_context );
522  ( *scheduler->Operations.release_job )(
523    scheduler,
524    the_thread,
525    priority_node,
526    deadline,
527    queue_context
528  );
529}
530
531/**
532 * @brief Cancels a job of a thread with respect to the scheduler.
533 *
534 * @param[in] the_thread The thread.
535 * @param[in] priority_node The priority node of the job.
536 * @param[in] queue_context The thread queue context to provide the set of
537 *   threads for _Thread_Priority_update().
538 */
539RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
540  Thread_Control       *the_thread,
541  Priority_Node        *priority_node,
542  Thread_queue_Context *queue_context
543)
544{
545  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
546
547  _Thread_queue_Context_clear_priority_updates( queue_context );
548  ( *scheduler->Operations.cancel_job )(
549    scheduler,
550    the_thread,
551    priority_node,
552    queue_context
553  );
554}
555
556/**
557 * @brief Scheduler method invoked at each clock tick.
558 *
559 * This method is invoked at each clock tick to allow the scheduler
560 * implementation to perform any activities required.  For the
561 * scheduler which support standard RTEMS features, this includes
562 * time-slicing management.
563 */
564RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
565{
566  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
567  Thread_Control *executing = cpu->executing;
568
569  if ( scheduler != NULL && executing != NULL ) {
570    ( *scheduler->Operations.tick )( scheduler, executing );
571  }
572}
573
574/**
575 * @brief Starts the idle thread for a particular processor.
576 *
577 * @param[in] scheduler The scheduler instance.
578 * @param[in,out] the_thread The idle thread for the processor.
579 * @param[in,out] cpu The processor for the idle thread.
580 *
581 * @see _Thread_Create_idle().
582 */
583RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
584  const Scheduler_Control *scheduler,
585  Thread_Control          *the_thread,
586  Per_CPU_Control         *cpu
587)
588{
589  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
590}
591
592#if defined(RTEMS_SMP)
593RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
594  uint32_t cpu_index
595)
596{
597  return &_Scheduler_Assignments[ cpu_index ];
598}
599
600RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
601  const Scheduler_Assignment *assignment
602)
603{
604  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
605}
606
607RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
608  const Scheduler_Assignment *assignment
609)
610{
611  return assignment->scheduler != NULL;
612}
613#endif /* defined(RTEMS_SMP) */
614
615RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
616  const Scheduler_Control *scheduler,
617  uint32_t cpu_index
618)
619{
620#if defined(RTEMS_SMP)
621  const Scheduler_Assignment *assignment =
622    _Scheduler_Get_assignment( cpu_index );
623
624  return assignment->scheduler == scheduler;
625#else
626  (void) scheduler;
627  (void) cpu_index;
628
629  return true;
630#endif
631}
632
633#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
634
635RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
636  const Scheduler_Control *scheduler,
637  size_t                   cpusetsize,
638  cpu_set_t               *cpuset
639)
640{
641  uint32_t cpu_count = _SMP_Get_processor_count();
642  uint32_t cpu_index;
643
644  CPU_ZERO_S( cpusetsize, cpuset );
645
646  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
647#if defined(RTEMS_SMP)
648    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
649      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
650    }
651#else
652    (void) scheduler;
653
654    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
655#endif
656  }
657}
658
659RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
660  const Scheduler_Control *scheduler,
661  Thread_Control          *the_thread,
662  size_t                   cpusetsize,
663  cpu_set_t               *cpuset
664)
665{
666  (void) the_thread;
667
668  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
669
670  return true;
671}
672
673bool _Scheduler_Get_affinity(
674  Thread_Control *the_thread,
675  size_t          cpusetsize,
676  cpu_set_t      *cpuset
677);
678
679RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
680  const Scheduler_Control *scheduler,
681  Thread_Control          *the_thread,
682  size_t                   cpusetsize,
683  const cpu_set_t         *cpuset
684)
685{
686  uint32_t cpu_count = _SMP_Get_processor_count();
687  uint32_t cpu_index;
688  bool     ok = true;
689
690  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
691#if defined(RTEMS_SMP)
692    const Scheduler_Control *scheduler_of_cpu =
693      _Scheduler_Get_by_CPU_index( cpu_index );
694
695    ok = ok
696      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
697        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
698          && scheduler != scheduler_of_cpu ) );
699#else
700    (void) scheduler;
701
702    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
703#endif
704  }
705
706  return ok;
707}
708
709bool _Scheduler_Set_affinity(
710  Thread_Control  *the_thread,
711  size_t           cpusetsize,
712  const cpu_set_t *cpuset
713);
714
715#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
716
717RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
718  const Scheduler_Control *scheduler,
719  Thread_Control          *the_thread,
720  Scheduler_Node          *node,
721  void                  ( *extract )(
722                             const Scheduler_Control *,
723                             Thread_Control *,
724                             Scheduler_Node *
725                        ),
726  void                  ( *schedule )(
727                             const Scheduler_Control *,
728                             Thread_Control *,
729                             bool
730                        )
731)
732{
733  ( *extract )( scheduler, the_thread, node );
734
735  /* TODO: flash critical section? */
736
737  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
738    ( *schedule )( scheduler, the_thread, true );
739  }
740}
741
742RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
743  const Scheduler_Control *scheduler
744)
745{
746#if defined(RTEMS_SMP)
747  return _Scheduler_Get_context( scheduler )->processor_count;
748#else
749  (void) scheduler;
750
751  return 1;
752#endif
753}
754
755RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
756{
757  return _Objects_Build_id(
758    OBJECTS_FAKE_OBJECTS_API,
759    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
760    _Objects_Local_node,
761    (uint16_t) ( scheduler_index + 1 )
762  );
763}
764
765RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
766{
767  uint32_t minimum_id = _Scheduler_Build_id( 0 );
768
769  return id - minimum_id;
770}
771
772RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
773  Objects_Id                id,
774  const Scheduler_Control **scheduler_p
775)
776{
777  uint32_t index = _Scheduler_Get_index_by_id( id );
778  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
779
780  *scheduler_p = scheduler;
781
782  return index < _Scheduler_Count
783    && _Scheduler_Get_processor_count( scheduler ) > 0;
784}
785
786RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
787{
788  const Scheduler_Control *scheduler;
789  bool ok = _Scheduler_Get_by_id( id, &scheduler );
790
791  (void) scheduler;
792
793  return ok;
794}
795
796RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
797  const Scheduler_Control *scheduler
798)
799{
800  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
801}
802
803RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
804  Thread_Control   *the_thread,
805  Priority_Control  new_priority,
806  bool              prepend_it
807)
808{
809  Scheduler_Node *scheduler_node;
810
811  scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
812  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
813}
814
815#if defined(RTEMS_SMP)
816/**
817 * @brief Gets an idle thread from the scheduler instance.
818 *
819 * @param[in] context The scheduler instance context.
820 *
821 * @retval idle An idle thread for use.  This function must always return an
822 * idle thread.  If none is available, then this is a fatal error.
823 */
824typedef Thread_Control *( *Scheduler_Get_idle_thread )(
825  Scheduler_Context *context
826);
827
828/**
829 * @brief Releases an idle thread to the scheduler instance for reuse.
830 *
831 * @param[in] context The scheduler instance context.
832 * @param[in] idle The idle thread to release
833 */
834typedef void ( *Scheduler_Release_idle_thread )(
835  Scheduler_Context *context,
836  Thread_Control    *idle
837);
838
839RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
840  Thread_Control *the_thread,
841  Scheduler_Node *node
842)
843{
844  the_thread->Scheduler.node = node;
845}
846
847RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
848  Thread_Control       *the_thread,
849  Scheduler_Node       *node,
850  const Thread_Control *previous_user_of_node
851)
852{
853  const Scheduler_Control *scheduler =
854    _Scheduler_Get_own( previous_user_of_node );
855
856  the_thread->Scheduler.control = scheduler;
857  _Scheduler_Thread_set_node( the_thread, node );
858}
859
860extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
861
862RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
863  Thread_Control         *the_thread,
864  Thread_Scheduler_state  new_state
865)
866{
867  _Assert(
868    _Scheduler_Thread_state_valid_state_changes
869      [ the_thread->Scheduler.state ][ new_state ]
870  );
871  _Assert(
872    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
873      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
874      || !_System_state_Is_up( _System_state_Get() )
875  );
876
877  the_thread->Scheduler.state = new_state;
878}
879
880/**
881 * @brief Changes the scheduler help state of a thread.
882 *
883 * @param[in] the_thread The thread.
884 * @param[in] new_help_state The new help state.
885 *
886 * @return The previous help state.
887 */
888RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
889  Thread_Control       *the_thread,
890  Scheduler_Help_state  new_help_state
891)
892{
893  Scheduler_Node *node = _Thread_Scheduler_get_own_node( the_thread );
894  Scheduler_Help_state previous_help_state = node->help_state;
895
896  node->help_state = new_help_state;
897
898  return previous_help_state;
899}
900
901/**
902 * @brief Changes the resource tree root of a thread.
903 *
904 * For each node of the resource sub-tree specified by the top thread the
905 * scheduler asks for help.  So the root thread gains access to all scheduler
906 * nodes corresponding to the resource sub-tree.  In case a thread previously
907 * granted help is displaced by this operation, then the scheduler asks for
908 * help using its remaining resource tree.
909 *
910 * The run-time of this function depends on the size of the resource sub-tree
911 * and other resource trees in case threads in need for help are produced
912 * during this operation.
913 *
914 * @param[in] top The thread specifying the resource sub-tree top.
915 * @param[in] root The thread specifying the new resource sub-tree root.
916 */
917void _Scheduler_Thread_change_resource_root(
918  Thread_Control *top,
919  Thread_Control *root
920);
921
922RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
923  Scheduler_Node *node,
924  Thread_Control *idle
925)
926{
927  _Assert(
928    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
929      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
930  );
931  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
932  _Assert(
933    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
934  );
935
936  _Scheduler_Thread_set_node( idle, node );
937
938  _Scheduler_Node_set_user( node, idle );
939  node->idle = idle;
940}
941
942/**
943 * @brief Use an idle thread for this scheduler node.
944 *
945 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
946 * helping state may use an idle thread for the scheduler node owned by itself
947 * in case it executes currently using another scheduler node or in case it is
948 * in a blocking state.
949 *
950 * @param[in] context The scheduler instance context.
951 * @param[in] node The node which wants to use the idle thread.
952 * @param[in] get_idle_thread Function to get an idle thread.
953 */
954RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
955  Scheduler_Context         *context,
956  Scheduler_Node            *node,
957  Scheduler_Get_idle_thread  get_idle_thread
958)
959{
960  Thread_Control *idle = ( *get_idle_thread )( context );
961
962  _Scheduler_Set_idle_thread( node, idle );
963
964  return idle;
965}
966
967typedef enum {
968  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
969  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
970  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
971} Scheduler_Try_to_schedule_action;
972
973/**
974 * @brief Try to schedule this scheduler node.
975 *
976 * @param[in] context The scheduler instance context.
977 * @param[in] node The node which wants to get scheduled.
978 * @param[in] idle A potential idle thread used by a potential victim node.
979 * @param[in] get_idle_thread Function to get an idle thread.
980 *
981 * @retval true This node can be scheduled.
982 * @retval false Otherwise.
983 */
984RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
985_Scheduler_Try_to_schedule_node(
986  Scheduler_Context         *context,
987  Scheduler_Node            *node,
988  Thread_Control            *idle,
989  Scheduler_Get_idle_thread  get_idle_thread
990)
991{
992  ISR_lock_Context                  lock_context;
993  Scheduler_Try_to_schedule_action  action;
994  Thread_Control                   *owner;
995  Thread_Control                   *user;
996
997  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
998  user = _Scheduler_Node_get_user( node );
999
1000  _Thread_Scheduler_acquire_critical( user, &lock_context );
1001
1002  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1003    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1004    _Thread_Scheduler_release_critical( user, &lock_context );
1005    return action;
1006  }
1007
1008  owner = _Scheduler_Node_get_owner( node );
1009
1010  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
1011    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1012      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1013    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
1014      if ( idle != NULL ) {
1015        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1016      } else {
1017        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1018      }
1019    } else {
1020      _Scheduler_Node_set_user( node, owner );
1021    }
1022  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1023    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1024      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1025    } else if ( idle != NULL ) {
1026      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1027    } else {
1028      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1029    }
1030  } else {
1031    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1032
1033    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1034      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1035    } else {
1036      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1037    }
1038  }
1039
1040  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1041    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1042  }
1043
1044  _Thread_Scheduler_release_critical( user, &lock_context );
1045  return action;
1046}
1047
1048/**
1049 * @brief Release an idle thread using this scheduler node.
1050 *
1051 * @param[in] context The scheduler instance context.
1052 * @param[in] node The node which may have an idle thread as user.
1053 * @param[in] release_idle_thread Function to release an idle thread.
1054 *
1055 * @retval idle The idle thread which used this node.
1056 * @retval NULL This node had no idle thread as an user.
1057 */
1058RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1059  Scheduler_Context             *context,
1060  Scheduler_Node                *node,
1061  Scheduler_Release_idle_thread  release_idle_thread
1062)
1063{
1064  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1065
1066  if ( idle != NULL ) {
1067    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1068
1069    node->idle = NULL;
1070    _Scheduler_Node_set_user( node, owner );
1071    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
1072    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
1073
1074    ( *release_idle_thread )( context, idle );
1075  }
1076
1077  return idle;
1078}
1079
1080RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1081  Scheduler_Node *needs_idle,
1082  Scheduler_Node *uses_idle,
1083  Thread_Control *idle
1084)
1085{
1086  uses_idle->idle = NULL;
1087  _Scheduler_Node_set_user(
1088    uses_idle,
1089    _Scheduler_Node_get_owner( uses_idle )
1090  );
1091  _Scheduler_Set_idle_thread( needs_idle, idle );
1092}
1093
1094/**
1095 * @brief Block this scheduler node.
1096 *
1097 * @param[in] context The scheduler instance context.
1098 * @param[in] thread The thread which wants to get blocked referencing this
1099 *   node.  This is not necessarily the user of this node in case the node
1100 *   participates in the scheduler helping protocol.
1101 * @param[in] node The node which wants to get blocked.
1102 * @param[in] is_scheduled This node is scheduled.
1103 * @param[in] get_idle_thread Function to get an idle thread.
1104 *
1105 * @retval thread_cpu The processor of the thread.  Indicates to continue with
1106 *   the blocking operation.
1107 * @retval NULL Otherwise.
1108 */
1109RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
1110  Scheduler_Context         *context,
1111  Thread_Control            *thread,
1112  Scheduler_Node            *node,
1113  bool                       is_scheduled,
1114  Scheduler_Get_idle_thread  get_idle_thread
1115)
1116{
1117  ISR_lock_Context  lock_context;
1118  Thread_Control   *old_user;
1119  Thread_Control   *new_user;
1120  Per_CPU_Control  *thread_cpu;
1121
1122  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1123  thread_cpu = _Thread_Get_CPU( thread );
1124  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1125  _Thread_Scheduler_release_critical( thread, &lock_context );
1126
1127  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1128    _Assert( thread == _Scheduler_Node_get_user( node ) );
1129
1130    return thread_cpu;
1131  }
1132
1133  new_user = NULL;
1134
1135  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1136    if ( is_scheduled ) {
1137      _Assert( thread == _Scheduler_Node_get_user( node ) );
1138      old_user = thread;
1139      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1140    }
1141  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
1142    if ( is_scheduled ) {
1143      old_user = _Scheduler_Node_get_user( node );
1144
1145      if ( thread == old_user ) {
1146        Thread_Control *owner = _Scheduler_Node_get_owner( node );
1147
1148        if (
1149          thread != owner
1150            && owner->Scheduler.state == THREAD_SCHEDULER_READY
1151        ) {
1152          new_user = owner;
1153          _Scheduler_Node_set_user( node, new_user );
1154        } else {
1155          new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1156        }
1157      }
1158    }
1159  } else {
1160    /* Not implemented, this is part of the OMIP support path. */
1161    _Assert(0);
1162  }
1163
1164  if ( new_user != NULL ) {
1165    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1166
1167    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1168    _Thread_Set_CPU( new_user, cpu );
1169    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1170  }
1171
1172  return NULL;
1173}
1174
1175/**
1176 * @brief Unblock this scheduler node.
1177 *
1178 * @param[in] context The scheduler instance context.
1179 * @param[in] the_thread The thread which wants to get unblocked.
1180 * @param[in] node The node which wants to get unblocked.
1181 * @param[in] is_scheduled This node is scheduled.
1182 * @param[in] release_idle_thread Function to release an idle thread.
1183 *
1184 * @retval true Continue with the unblocking operation.
1185 * @retval false Otherwise.
1186 */
1187RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1188  Scheduler_Context             *context,
1189  Thread_Control                *the_thread,
1190  Scheduler_Node                *node,
1191  bool                           is_scheduled,
1192  Scheduler_Release_idle_thread  release_idle_thread
1193)
1194{
1195  bool unblock;
1196
1197  if ( is_scheduled ) {
1198    Thread_Control *old_user = _Scheduler_Node_get_user( node );
1199    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1200    Thread_Control *idle = _Scheduler_Release_idle_thread(
1201      context,
1202      node,
1203      release_idle_thread
1204    );
1205    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1206    Thread_Control *new_user;
1207
1208    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1209      _Assert( idle != NULL );
1210      new_user = the_thread;
1211    } else if ( idle != NULL ) {
1212      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1213      new_user = the_thread;
1214    } else if ( the_thread != owner ) {
1215      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1216      _Assert( old_user != the_thread );
1217      _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
1218      new_user = the_thread;
1219      _Scheduler_Node_set_user( node, new_user );
1220    } else {
1221      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1222      _Assert( old_user != the_thread );
1223      _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1224      new_user = NULL;
1225    }
1226
1227    if ( new_user != NULL ) {
1228      _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1229      _Thread_Set_CPU( new_user, cpu );
1230      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1231    }
1232
1233    unblock = false;
1234  } else {
1235    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1236
1237    unblock = true;
1238  }
1239
1240  return unblock;
1241}
1242
1243/**
1244 * @brief Asks a ready scheduler node for help.
1245 *
1246 * @param[in] node The ready node offering help.
1247 * @param[in] needs_help The thread needing help.
1248 *
1249 * @retval needs_help The thread needing help.
1250 */
1251RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
1252  Scheduler_Node *node,
1253  Thread_Control *needs_help
1254)
1255{
1256  _Scheduler_Node_set_user( node, needs_help );
1257
1258  return needs_help;
1259}
1260
1261/**
1262 * @brief Asks a scheduled scheduler node for help.
1263 *
1264 * @param[in] context The scheduler instance context.
1265 * @param[in] node The scheduled node offering help.
1266 * @param[in] offers_help The thread offering help.
1267 * @param[in] needs_help The thread needing help.
1268 * @param[in] previous_accepts_help The previous thread accepting help by this
1269 *   scheduler node.
1270 * @param[in] release_idle_thread Function to release an idle thread.
1271 *
1272 * @retval needs_help The previous thread accepting help by this scheduler node
1273 *   which was displaced by the thread needing help.
1274 * @retval NULL There are no more threads needing help.
1275 */
1276RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
1277  Scheduler_Context             *context,
1278  Scheduler_Node                *node,
1279  Thread_Control                *offers_help,
1280  Thread_Control                *needs_help,
1281  Thread_Control                *previous_accepts_help,
1282  Scheduler_Release_idle_thread  release_idle_thread
1283)
1284{
1285  Thread_Control *next_needs_help = NULL;
1286  Thread_Control *old_user = NULL;
1287  Thread_Control *new_user = NULL;
1288
1289  if (
1290    previous_accepts_help != needs_help
1291      && _Scheduler_Thread_get_node( previous_accepts_help ) == node
1292  ) {
1293    Thread_Control *idle = _Scheduler_Release_idle_thread(
1294      context,
1295      node,
1296      release_idle_thread
1297    );
1298
1299    if ( idle != NULL ) {
1300      old_user = idle;
1301    } else {
1302      _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
1303      old_user = previous_accepts_help;
1304    }
1305
1306    if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1307      new_user = needs_help;
1308    } else {
1309      _Assert(
1310        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1311          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1312      );
1313      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
1314
1315      new_user = offers_help;
1316    }
1317
1318    if ( previous_accepts_help != offers_help ) {
1319      next_needs_help = previous_accepts_help;
1320    }
1321  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1322    Thread_Control *idle = _Scheduler_Release_idle_thread(
1323      context,
1324      node,
1325      release_idle_thread
1326    );
1327
1328    if ( idle != NULL ) {
1329      old_user = idle;
1330    } else {
1331      old_user = _Scheduler_Node_get_user( node );
1332    }
1333
1334    new_user = needs_help;
1335  } else {
1336    _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
1337  }
1338
1339  if ( new_user != old_user ) {
1340    Per_CPU_Control *cpu_self = _Per_CPU_Get();
1341    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1342
1343    _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
1344    _Scheduler_Thread_set_scheduler_and_node(
1345      old_user,
1346      _Thread_Scheduler_get_own_node( old_user ),
1347      old_user
1348    );
1349
1350    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1351    _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
1352
1353    _Scheduler_Node_set_user( node, new_user );
1354    _Thread_Set_CPU( new_user, cpu );
1355    _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
1356  }
1357
1358  return next_needs_help;
1359}
1360
1361/**
1362 * @brief Asks a blocked scheduler node for help.
1363 *
1364 * @param[in] context The scheduler instance context.
1365 * @param[in] node The scheduled node offering help.
1366 * @param[in] offers_help The thread offering help.
1367 * @param[in] needs_help The thread needing help.
1368 *
1369 * @retval true Enqueue this scheduler node.
1370 * @retval false Otherwise.
1371 */
1372RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
1373  Scheduler_Context *context,
1374  Scheduler_Node    *node,
1375  Thread_Control    *offers_help,
1376  Thread_Control    *needs_help
1377)
1378{
1379  bool enqueue;
1380
1381  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1382
1383  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1384    _Scheduler_Node_set_user( node, needs_help );
1385    _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
1386
1387    enqueue = true;
1388  } else {
1389    enqueue = false;
1390  }
1391
1392  return enqueue;
1393}
1394#endif
1395
1396RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1397  Thread_Control *new_heir,
1398  bool            force_dispatch
1399)
1400{
1401  Thread_Control *heir = _Thread_Heir;
1402
1403  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1404#if defined(RTEMS_SMP)
1405    /*
1406     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1407     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1408     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1409     * schedulers.
1410     */
1411    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1412    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1413#endif
1414    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1415    _Thread_Heir = new_heir;
1416    _Thread_Dispatch_necessary = true;
1417  }
1418}
1419
1420RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1421  const Scheduler_Control *new_scheduler,
1422  Thread_Control          *the_thread,
1423  Priority_Control         priority
1424)
1425{
1426  Scheduler_Node *new_scheduler_node;
1427  Scheduler_Node *old_scheduler_node;
1428
1429  if (
1430    _Thread_Owns_resources( the_thread )
1431      || the_thread->Wait.queue != NULL
1432  ) {
1433    return STATUS_RESOURCE_IN_USE;
1434  }
1435
1436  old_scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1437  _Priority_Plain_extract(
1438    &old_scheduler_node->Wait.Priority,
1439    &the_thread->Real_priority
1440  );
1441
1442  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1443    _Priority_Plain_insert(
1444      &old_scheduler_node->Wait.Priority,
1445      &the_thread->Real_priority,
1446      the_thread->Real_priority.priority
1447    );
1448    return STATUS_RESOURCE_IN_USE;
1449  }
1450
1451#if defined(RTEMS_SMP)
1452  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1453    the_thread,
1454    _Scheduler_Get_index( new_scheduler )
1455  );
1456#else
1457  new_scheduler_node = old_scheduler_node;
1458#endif
1459
1460  the_thread->Start.initial_priority = priority;
1461  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1462  _Priority_Initialize_one(
1463    &new_scheduler_node->Wait.Priority,
1464    &the_thread->Real_priority
1465  );
1466
1467#if defined(RTEMS_SMP)
1468  _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1469  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1470  _Chain_Initialize_one(
1471    &the_thread->Scheduler.Wait_nodes,
1472    &new_scheduler_node->Thread.Wait_node
1473  );
1474
1475  {
1476    const Scheduler_Control *old_scheduler;
1477
1478    old_scheduler = _Scheduler_Get( the_thread );
1479
1480    if ( old_scheduler != new_scheduler ) {
1481      States_Control current_state;
1482
1483      current_state = the_thread->current_state;
1484
1485      if ( _States_Is_ready( current_state ) ) {
1486        _Scheduler_Block( the_thread );
1487      }
1488
1489      the_thread->Scheduler.own_control = new_scheduler;
1490      the_thread->Scheduler.control = new_scheduler;
1491      the_thread->Scheduler.own_node = new_scheduler_node;
1492      the_thread->Scheduler.node = new_scheduler_node;
1493      _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1494
1495      if ( _States_Is_ready( current_state ) ) {
1496        _Scheduler_Unblock( the_thread );
1497      }
1498
1499      return STATUS_SUCCESSFUL;
1500    }
1501  }
1502#endif
1503
1504  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1505  _Scheduler_Update_priority( the_thread );
1506  return STATUS_SUCCESSFUL;
1507}
1508
1509/** @} */
1510
1511#ifdef __cplusplus
1512}
1513#endif
1514
1515#endif
1516/* end of include file */
Note: See TracBrowser for help on using the repository browser.