source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ a7a8ec03

5
Last change on this file since a7a8ec03 was a7a8ec03, checked in by Sebastian Huber <sebastian.huber@…>, on 10/12/16 at 07:55:34

score: Protect thread scheduler state changes

Update #2556.

  • Property mode set to 100644
File size: 41.6 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/cpusetimpl.h>
26#include <rtems/score/priorityimpl.h>
27#include <rtems/score/smpimpl.h>
28#include <rtems/score/status.h>
29#include <rtems/score/threadimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35/**
36 * @addtogroup ScoreScheduler
37 */
38/**@{**/
39
40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
57  const Thread_Control *the_thread
58)
59{
60#if defined(RTEMS_SMP)
61  return the_thread->Scheduler.control;
62#else
63  (void) the_thread;
64
65  return &_Scheduler_Table[ 0 ];
66#endif
67}
68
69RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
70  const Thread_Control *the_thread
71)
72{
73#if defined(RTEMS_SMP)
74  return the_thread->Scheduler.own_control;
75#else
76  (void) the_thread;
77
78  return &_Scheduler_Table[ 0 ];
79#endif
80}
81
82RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
83  uint32_t cpu_index
84)
85{
86#if defined(RTEMS_SMP)
87  return _Scheduler_Assignments[ cpu_index ].scheduler;
88#else
89  (void) cpu_index;
90
91  return &_Scheduler_Table[ 0 ];
92#endif
93}
94
95RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
96  const Per_CPU_Control *cpu
97)
98{
99  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
100
101  return _Scheduler_Get_by_CPU_index( cpu_index );
102}
103
104ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
105
106/**
107 * @brief Acquires the scheduler instance inside a critical section (interrupts
108 * disabled).
109 *
110 * @param[in] scheduler The scheduler instance.
111 * @param[in] lock_context The lock context to use for
112 *   _Scheduler_Release_critical().
113 */
114RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
115  const Scheduler_Control *scheduler,
116  ISR_lock_Context        *lock_context
117)
118{
119  (void) scheduler;
120  _ISR_lock_Acquire( &_Scheduler_Lock, lock_context );
121}
122
123/**
124 * @brief Releases the scheduler instance inside a critical section (interrupts
125 * disabled).
126 *
127 * @param[in] scheduler The scheduler instance.
128 * @param[in] lock_context The lock context used for
129 *   _Scheduler_Acquire_critical().
130 */
131RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
132  const Scheduler_Control *scheduler,
133  ISR_lock_Context        *lock_context
134)
135{
136  (void) scheduler;
137  _ISR_lock_Release( &_Scheduler_Lock, lock_context );
138}
139
140RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
141  const Thread_Control *the_thread
142)
143{
144#if defined(RTEMS_SMP)
145  return the_thread->Scheduler.node;
146#else
147  return the_thread->Scheduler.nodes;
148#endif
149}
150
151/**
152 * The preferred method to add a new scheduler is to define the jump table
153 * entries and add a case to the _Scheduler_Initialize routine.
154 *
155 * Generic scheduling implementations that rely on the ready queue only can
156 * be found in the _Scheduler_queue_XXX functions.
157 */
158
159/*
160 * Passing the Scheduler_Control* to these functions allows for multiple
161 * scheduler's to exist simultaneously, which could be useful on an SMP
162 * system.  Then remote Schedulers may be accessible.  How to protect such
163 * accesses remains an open problem.
164 */
165
166/**
167 * @brief General scheduling decision.
168 *
169 * This kernel routine implements the scheduling decision logic for
170 * the scheduler. It does NOT dispatch.
171 *
172 * @param[in] the_thread The thread which state changed previously.
173 */
174RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
175{
176  const Scheduler_Control *scheduler;
177  ISR_lock_Context         lock_context;
178
179  scheduler = _Scheduler_Get( the_thread );
180  _Scheduler_Acquire_critical( scheduler, &lock_context );
181
182  ( *scheduler->Operations.schedule )( scheduler, the_thread );
183
184  _Scheduler_Release_critical( scheduler, &lock_context );
185}
186
187#if defined(RTEMS_SMP)
188typedef struct {
189  Thread_Control *needs_help;
190  Thread_Control *next_needs_help;
191} Scheduler_Ask_for_help_context ;
192
193RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
194  Resource_Node *resource_node,
195  void          *arg
196)
197{
198  bool done;
199  Scheduler_Ask_for_help_context *help_context = arg;
200  Thread_Control *previous_needs_help = help_context->needs_help;
201  Thread_Control *next_needs_help;
202  Thread_Control *offers_help =
203    THREAD_RESOURCE_NODE_TO_THREAD( resource_node );
204  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
205
206  next_needs_help = ( *scheduler->Operations.ask_for_help_X )(
207    scheduler,
208    offers_help,
209    previous_needs_help
210  );
211
212  done = next_needs_help != previous_needs_help;
213
214  if ( done ) {
215    help_context->next_needs_help = next_needs_help;
216  }
217
218  return done;
219}
220
221/**
222 * @brief Ask threads depending on resources owned by the thread for help.
223 *
224 * A thread is in need for help if it lost its assigned processor due to
225 * pre-emption by a higher priority thread or it was not possible to assign it
226 * a processor since its priority is to low on its current scheduler instance.
227 *
228 * The run-time of this function depends on the size of the resource tree of
229 * the thread needing help and other resource trees in case threads in need for
230 * help are produced during this operation.
231 *
232 * @param[in] needs_help The thread needing help.
233 */
234RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_X(
235  Thread_Control *needs_help
236)
237{
238  do {
239    const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );
240
241    needs_help = ( *scheduler->Operations.ask_for_help_X )(
242      scheduler,
243      needs_help,
244      needs_help
245    );
246
247    if ( needs_help != NULL ) {
248      Scheduler_Ask_for_help_context help_context = { needs_help, NULL };
249
250      _Resource_Iterate(
251        &needs_help->Resource_node,
252        _Scheduler_Ask_for_help_visitor,
253        &help_context
254      );
255
256      needs_help = help_context.next_needs_help;
257    }
258  } while ( needs_help != NULL );
259}
260
261RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
262  Thread_Control *needs_help
263)
264{
265  if (
266    needs_help != NULL
267      && _Resource_Node_owns_resources( &needs_help->Resource_node )
268  ) {
269    Scheduler_Node *node = _Thread_Scheduler_get_own_node( needs_help );
270
271    if (
272      node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
273        || _Scheduler_Node_get_user( node ) != needs_help
274    ) {
275      _Scheduler_Ask_for_help_X( needs_help );
276    }
277  }
278}
279#endif
280
281/**
282 * @brief Scheduler yield with a particular thread.
283 *
284 * This routine is invoked when a thread wishes to voluntarily transfer control
285 * of the processor to another thread.
286 *
287 * @param[in] the_thread The yielding thread.
288 */
289RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
290{
291  const Scheduler_Control *scheduler;
292  ISR_lock_Context         lock_context;
293#if defined(RTEMS_SMP)
294  Thread_Control          *needs_help;
295#endif
296
297  scheduler = _Scheduler_Get( the_thread );
298  _Scheduler_Acquire_critical( scheduler, &lock_context );
299
300#if defined(RTEMS_SMP)
301  needs_help =
302#endif
303  ( *scheduler->Operations.yield )(
304    scheduler,
305    the_thread,
306    _Thread_Scheduler_get_home_node( the_thread )
307  );
308
309#if defined(RTEMS_SMP)
310  _Scheduler_Ask_for_help_if_necessary( needs_help );
311#endif
312
313  _Scheduler_Release_critical( scheduler, &lock_context );
314}
315
316/**
317 * @brief Blocks a thread with respect to the scheduler.
318 *
319 * This routine removes @a the_thread from the scheduling decision for
320 * the scheduler. The primary task is to remove the thread from the
321 * ready queue.  It performs any necessary schedulering operations
322 * including the selection of a new heir thread.
323 *
324 * @param[in] the_thread The thread.
325 */
326RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
327{
328  const Scheduler_Control *scheduler;
329  ISR_lock_Context         lock_context;
330
331  scheduler = _Scheduler_Get( the_thread );
332  _Scheduler_Acquire_critical( scheduler, &lock_context );
333
334  ( *scheduler->Operations.block )(
335    scheduler,
336    the_thread,
337    _Thread_Scheduler_get_home_node( the_thread )
338  );
339
340  _Scheduler_Release_critical( scheduler, &lock_context );
341}
342
343/**
344 * @brief Unblocks a thread with respect to the scheduler.
345 *
346 * This operation must fetch the latest thread priority value for this
347 * scheduler instance and update its internal state if necessary.
348 *
349 * @param[in] the_thread The thread.
350 *
351 * @see _Scheduler_Node_get_priority().
352 */
353RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
354{
355  const Scheduler_Control *scheduler;
356  ISR_lock_Context         lock_context;
357#if defined(RTEMS_SMP)
358  Thread_Control          *needs_help;
359#endif
360
361  scheduler = _Scheduler_Get( the_thread );
362  _Scheduler_Acquire_critical( scheduler, &lock_context );
363
364#if defined(RTEMS_SMP)
365  needs_help =
366#endif
367  ( *scheduler->Operations.unblock )(
368    scheduler,
369    the_thread,
370    _Thread_Scheduler_get_home_node( the_thread )
371  );
372
373#if defined(RTEMS_SMP)
374  _Scheduler_Ask_for_help_if_necessary( needs_help );
375#endif
376
377  _Scheduler_Release_critical( scheduler, &lock_context );
378}
379
380/**
381 * @brief Propagates a priority change of a thread to the scheduler.
382 *
383 * On uni-processor configurations, this operation must evaluate the thread
384 * state.  In case the thread is not ready, then the priority update should be
385 * deferred to the next scheduler unblock operation.
386 *
387 * The operation must update the heir and thread dispatch necessary variables
388 * in case the set of scheduled threads changes.
389 *
390 * @param[in] the_thread The thread changing its priority.
391 *
392 * @see _Scheduler_Node_get_priority().
393 */
394RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
395{
396  const Scheduler_Control *own_scheduler;
397  ISR_lock_Context         lock_context;
398#if defined(RTEMS_SMP)
399  Thread_Control          *needs_help;
400#endif
401
402  own_scheduler = _Scheduler_Get_own( the_thread );
403  _Scheduler_Acquire_critical( own_scheduler, &lock_context );
404
405#if defined(RTEMS_SMP)
406  needs_help =
407#endif
408  ( *own_scheduler->Operations.update_priority )(
409    own_scheduler,
410    the_thread,
411    _Thread_Scheduler_get_home_node( the_thread )
412  );
413
414#if defined(RTEMS_SMP)
415  _Scheduler_Ask_for_help_if_necessary( needs_help );
416#endif
417
418  _Scheduler_Release_critical( own_scheduler, &lock_context );
419}
420
421/**
422 * @brief Maps a thread priority from the user domain to the scheduler domain.
423 *
424 * Let M be the maximum scheduler priority.  The mapping must be bijective in
425 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
426 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
427 * other values the mapping is undefined.
428 *
429 * @param[in] scheduler The scheduler instance.
430 * @param[in] priority The user domain thread priority.
431 *
432 * @return The corresponding thread priority of the scheduler domain is returned.
433 */
434RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
435  const Scheduler_Control *scheduler,
436  Priority_Control         priority
437)
438{
439  return ( *scheduler->Operations.map_priority )( scheduler, priority );
440}
441
442/**
443 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
444 *
445 * @param[in] scheduler The scheduler instance.
446 * @param[in] priority The scheduler domain thread priority.
447 *
448 * @return The corresponding thread priority of the user domain is returned.
449 */
450RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
451  const Scheduler_Control *scheduler,
452  Priority_Control         priority
453)
454{
455  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
456}
457
458/**
459 * @brief Initializes a scheduler node.
460 *
461 * The scheduler node contains arbitrary data on function entry.  The caller
462 * must ensure that _Scheduler_Node_destroy() will be called after a
463 * _Scheduler_Node_initialize() before the memory of the scheduler node is
464 * destroyed.
465 *
466 * @param[in] scheduler The scheduler instance.
467 * @param[in] node The scheduler node to initialize.
468 * @param[in] the_thread The thread of the scheduler node to initialize.
469 * @param[in] priority The thread priority.
470 */
471RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
472  const Scheduler_Control *scheduler,
473  Scheduler_Node          *node,
474  Thread_Control          *the_thread,
475  Priority_Control         priority
476)
477{
478  ( *scheduler->Operations.node_initialize )(
479    scheduler,
480    node,
481    the_thread,
482    priority
483  );
484}
485
486/**
487 * @brief Destroys a scheduler node.
488 *
489 * The caller must ensure that _Scheduler_Node_destroy() will be called only
490 * after a corresponding _Scheduler_Node_initialize().
491 *
492 * @param[in] scheduler The scheduler instance.
493 * @param[in] node The scheduler node to destroy.
494 */
495RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
496  const Scheduler_Control *scheduler,
497  Scheduler_Node          *node
498)
499{
500  ( *scheduler->Operations.node_destroy )( scheduler, node );
501}
502
503/**
504 * @brief Releases a job of a thread with respect to the scheduler.
505 *
506 * @param[in] the_thread The thread.
507 * @param[in] priority_node The priority node of the job.
508 * @param[in] deadline The deadline in watchdog ticks since boot.
509 * @param[in] queue_context The thread queue context to provide the set of
510 *   threads for _Thread_Priority_update().
511 */
512RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
513  Thread_Control       *the_thread,
514  Priority_Node        *priority_node,
515  uint64_t              deadline,
516  Thread_queue_Context *queue_context
517)
518{
519  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
520
521  _Thread_queue_Context_clear_priority_updates( queue_context );
522  ( *scheduler->Operations.release_job )(
523    scheduler,
524    the_thread,
525    priority_node,
526    deadline,
527    queue_context
528  );
529}
530
531/**
532 * @brief Cancels a job of a thread with respect to the scheduler.
533 *
534 * @param[in] the_thread The thread.
535 * @param[in] priority_node The priority node of the job.
536 * @param[in] queue_context The thread queue context to provide the set of
537 *   threads for _Thread_Priority_update().
538 */
539RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
540  Thread_Control       *the_thread,
541  Priority_Node        *priority_node,
542  Thread_queue_Context *queue_context
543)
544{
545  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
546
547  _Thread_queue_Context_clear_priority_updates( queue_context );
548  ( *scheduler->Operations.cancel_job )(
549    scheduler,
550    the_thread,
551    priority_node,
552    queue_context
553  );
554}
555
556/**
557 * @brief Scheduler method invoked at each clock tick.
558 *
559 * This method is invoked at each clock tick to allow the scheduler
560 * implementation to perform any activities required.  For the
561 * scheduler which support standard RTEMS features, this includes
562 * time-slicing management.
563 */
564RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
565{
566  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
567  Thread_Control *executing = cpu->executing;
568
569  if ( scheduler != NULL && executing != NULL ) {
570    ( *scheduler->Operations.tick )( scheduler, executing );
571  }
572}
573
574/**
575 * @brief Starts the idle thread for a particular processor.
576 *
577 * @param[in] scheduler The scheduler instance.
578 * @param[in,out] the_thread The idle thread for the processor.
579 * @param[in,out] cpu The processor for the idle thread.
580 *
581 * @see _Thread_Create_idle().
582 */
583RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
584  const Scheduler_Control *scheduler,
585  Thread_Control          *the_thread,
586  Per_CPU_Control         *cpu
587)
588{
589  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
590}
591
592#if defined(RTEMS_SMP)
593RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
594  uint32_t cpu_index
595)
596{
597  return &_Scheduler_Assignments[ cpu_index ];
598}
599
600RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
601  const Scheduler_Assignment *assignment
602)
603{
604  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
605}
606
607RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
608  const Scheduler_Assignment *assignment
609)
610{
611  return assignment->scheduler != NULL;
612}
613#endif /* defined(RTEMS_SMP) */
614
615RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
616  const Scheduler_Control *scheduler,
617  uint32_t cpu_index
618)
619{
620#if defined(RTEMS_SMP)
621  const Scheduler_Assignment *assignment =
622    _Scheduler_Get_assignment( cpu_index );
623
624  return assignment->scheduler == scheduler;
625#else
626  (void) scheduler;
627  (void) cpu_index;
628
629  return true;
630#endif
631}
632
633#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
634
635RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
636  const Scheduler_Control *scheduler,
637  size_t                   cpusetsize,
638  cpu_set_t               *cpuset
639)
640{
641  uint32_t cpu_count = _SMP_Get_processor_count();
642  uint32_t cpu_index;
643
644  CPU_ZERO_S( cpusetsize, cpuset );
645
646  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
647#if defined(RTEMS_SMP)
648    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
649      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
650    }
651#else
652    (void) scheduler;
653
654    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
655#endif
656  }
657}
658
659RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
660  const Scheduler_Control *scheduler,
661  Thread_Control          *the_thread,
662  size_t                   cpusetsize,
663  cpu_set_t               *cpuset
664)
665{
666  (void) the_thread;
667
668  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
669
670  return true;
671}
672
673bool _Scheduler_Get_affinity(
674  Thread_Control *the_thread,
675  size_t          cpusetsize,
676  cpu_set_t      *cpuset
677);
678
679RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
680  const Scheduler_Control *scheduler,
681  Thread_Control          *the_thread,
682  size_t                   cpusetsize,
683  const cpu_set_t         *cpuset
684)
685{
686  uint32_t cpu_count = _SMP_Get_processor_count();
687  uint32_t cpu_index;
688  bool     ok = true;
689
690  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
691#if defined(RTEMS_SMP)
692    const Scheduler_Control *scheduler_of_cpu =
693      _Scheduler_Get_by_CPU_index( cpu_index );
694
695    ok = ok
696      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
697        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
698          && scheduler != scheduler_of_cpu ) );
699#else
700    (void) scheduler;
701
702    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
703#endif
704  }
705
706  return ok;
707}
708
709bool _Scheduler_Set_affinity(
710  Thread_Control  *the_thread,
711  size_t           cpusetsize,
712  const cpu_set_t *cpuset
713);
714
715#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
716
717RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
718  const Scheduler_Control *scheduler,
719  Thread_Control          *the_thread,
720  Scheduler_Node          *node,
721  void                  ( *extract )(
722                             const Scheduler_Control *,
723                             Thread_Control *,
724                             Scheduler_Node *
725                        ),
726  void                  ( *schedule )(
727                             const Scheduler_Control *,
728                             Thread_Control *,
729                             bool
730                        )
731)
732{
733  ( *extract )( scheduler, the_thread, node );
734
735  /* TODO: flash critical section? */
736
737  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
738    ( *schedule )( scheduler, the_thread, true );
739  }
740}
741
742RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
743  const Scheduler_Control *scheduler
744)
745{
746#if defined(RTEMS_SMP)
747  return _Scheduler_Get_context( scheduler )->processor_count;
748#else
749  (void) scheduler;
750
751  return 1;
752#endif
753}
754
755RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
756{
757  return _Objects_Build_id(
758    OBJECTS_FAKE_OBJECTS_API,
759    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
760    _Objects_Local_node,
761    (uint16_t) ( scheduler_index + 1 )
762  );
763}
764
765RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
766{
767  uint32_t minimum_id = _Scheduler_Build_id( 0 );
768
769  return id - minimum_id;
770}
771
772RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
773  Objects_Id                id,
774  const Scheduler_Control **scheduler_p
775)
776{
777  uint32_t index = _Scheduler_Get_index_by_id( id );
778  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
779
780  *scheduler_p = scheduler;
781
782  return index < _Scheduler_Count
783    && _Scheduler_Get_processor_count( scheduler ) > 0;
784}
785
786RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
787{
788  const Scheduler_Control *scheduler;
789  bool ok = _Scheduler_Get_by_id( id, &scheduler );
790
791  (void) scheduler;
792
793  return ok;
794}
795
796RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
797  const Scheduler_Control *scheduler
798)
799{
800  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
801}
802
803RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
804  Thread_Control   *the_thread,
805  Priority_Control  new_priority,
806  bool              prepend_it
807)
808{
809  Scheduler_Node *scheduler_node;
810
811  scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
812  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
813}
814
815#if defined(RTEMS_SMP)
816/**
817 * @brief Gets an idle thread from the scheduler instance.
818 *
819 * @param[in] context The scheduler instance context.
820 *
821 * @retval idle An idle thread for use.  This function must always return an
822 * idle thread.  If none is available, then this is a fatal error.
823 */
824typedef Thread_Control *( *Scheduler_Get_idle_thread )(
825  Scheduler_Context *context
826);
827
828/**
829 * @brief Releases an idle thread to the scheduler instance for reuse.
830 *
831 * @param[in] context The scheduler instance context.
832 * @param[in] idle The idle thread to release
833 */
834typedef void ( *Scheduler_Release_idle_thread )(
835  Scheduler_Context *context,
836  Thread_Control    *idle
837);
838
839RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
840  Thread_Control *the_thread,
841  Scheduler_Node *node
842)
843{
844  the_thread->Scheduler.node = node;
845}
846
847RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
848  Thread_Control       *the_thread,
849  Scheduler_Node       *node,
850  const Thread_Control *previous_user_of_node
851)
852{
853  const Scheduler_Control *scheduler =
854    _Scheduler_Get_own( previous_user_of_node );
855
856  the_thread->Scheduler.control = scheduler;
857  _Scheduler_Thread_set_node( the_thread, node );
858}
859
860extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
861
862RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
863  Thread_Control         *the_thread,
864  Thread_Scheduler_state  new_state
865)
866{
867  _Assert(
868    _Scheduler_Thread_state_valid_state_changes
869      [ the_thread->Scheduler.state ][ new_state ]
870  );
871  _Assert(
872    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
873      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
874      || !_System_state_Is_up( _System_state_Get() )
875  );
876
877  the_thread->Scheduler.state = new_state;
878}
879
880/**
881 * @brief Changes the scheduler help state of a thread.
882 *
883 * @param[in] the_thread The thread.
884 * @param[in] new_help_state The new help state.
885 *
886 * @return The previous help state.
887 */
888RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
889  Thread_Control       *the_thread,
890  Scheduler_Help_state  new_help_state
891)
892{
893  Scheduler_Node *node = _Thread_Scheduler_get_own_node( the_thread );
894  Scheduler_Help_state previous_help_state = node->help_state;
895
896  node->help_state = new_help_state;
897
898  return previous_help_state;
899}
900
901/**
902 * @brief Changes the resource tree root of a thread.
903 *
904 * For each node of the resource sub-tree specified by the top thread the
905 * scheduler asks for help.  So the root thread gains access to all scheduler
906 * nodes corresponding to the resource sub-tree.  In case a thread previously
907 * granted help is displaced by this operation, then the scheduler asks for
908 * help using its remaining resource tree.
909 *
910 * The run-time of this function depends on the size of the resource sub-tree
911 * and other resource trees in case threads in need for help are produced
912 * during this operation.
913 *
914 * @param[in] top The thread specifying the resource sub-tree top.
915 * @param[in] root The thread specifying the new resource sub-tree root.
916 */
917void _Scheduler_Thread_change_resource_root(
918  Thread_Control *top,
919  Thread_Control *root
920);
921
922RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
923  Scheduler_Node *node,
924  Thread_Control *idle
925)
926{
927  _Assert(
928    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
929      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
930  );
931  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
932  _Assert(
933    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
934  );
935
936  _Scheduler_Thread_set_node( idle, node );
937
938  _Scheduler_Node_set_user( node, idle );
939  node->idle = idle;
940}
941
942/**
943 * @brief Use an idle thread for this scheduler node.
944 *
945 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
946 * helping state may use an idle thread for the scheduler node owned by itself
947 * in case it executes currently using another scheduler node or in case it is
948 * in a blocking state.
949 *
950 * @param[in] context The scheduler instance context.
951 * @param[in] node The node which wants to use the idle thread.
952 * @param[in] get_idle_thread Function to get an idle thread.
953 */
954RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
955  Scheduler_Context         *context,
956  Scheduler_Node            *node,
957  Scheduler_Get_idle_thread  get_idle_thread
958)
959{
960  Thread_Control *idle = ( *get_idle_thread )( context );
961
962  _Scheduler_Set_idle_thread( node, idle );
963
964  return idle;
965}
966
967typedef enum {
968  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
969  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
970  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
971} Scheduler_Try_to_schedule_action;
972
973/**
974 * @brief Try to schedule this scheduler node.
975 *
976 * @param[in] context The scheduler instance context.
977 * @param[in] node The node which wants to get scheduled.
978 * @param[in] idle A potential idle thread used by a potential victim node.
979 * @param[in] get_idle_thread Function to get an idle thread.
980 *
981 * @retval true This node can be scheduled.
982 * @retval false Otherwise.
983 */
984RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
985_Scheduler_Try_to_schedule_node(
986  Scheduler_Context         *context,
987  Scheduler_Node            *node,
988  Thread_Control            *idle,
989  Scheduler_Get_idle_thread  get_idle_thread
990)
991{
992  ISR_lock_Context                  lock_context;
993  Scheduler_Try_to_schedule_action  action;
994  Thread_Control                   *owner;
995  Thread_Control                   *user;
996
997  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
998  user = _Scheduler_Node_get_user( node );
999
1000  _Thread_Scheduler_acquire_critical( user, &lock_context );
1001
1002  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1003    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1004    _Thread_Scheduler_release_critical( user, &lock_context );
1005    return action;
1006  }
1007
1008  owner = _Scheduler_Node_get_owner( node );
1009
1010  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
1011    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1012      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1013    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
1014      if ( idle != NULL ) {
1015        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1016      } else {
1017        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1018      }
1019    } else {
1020      _Scheduler_Node_set_user( node, owner );
1021    }
1022  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1023    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1024      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1025    } else if ( idle != NULL ) {
1026      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1027    } else {
1028      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1029    }
1030  } else {
1031    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1032
1033    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1034      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1035    } else {
1036      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1037    }
1038  }
1039
1040  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1041    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1042  }
1043
1044  _Thread_Scheduler_release_critical( user, &lock_context );
1045  return action;
1046}
1047
1048/**
1049 * @brief Release an idle thread using this scheduler node.
1050 *
1051 * @param[in] context The scheduler instance context.
1052 * @param[in] node The node which may have an idle thread as user.
1053 * @param[in] release_idle_thread Function to release an idle thread.
1054 *
1055 * @retval idle The idle thread which used this node.
1056 * @retval NULL This node had no idle thread as an user.
1057 */
1058RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1059  Scheduler_Context             *context,
1060  Scheduler_Node                *node,
1061  Scheduler_Release_idle_thread  release_idle_thread
1062)
1063{
1064  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1065
1066  if ( idle != NULL ) {
1067    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1068
1069    node->idle = NULL;
1070    _Scheduler_Node_set_user( node, owner );
1071    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
1072    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
1073
1074    ( *release_idle_thread )( context, idle );
1075  }
1076
1077  return idle;
1078}
1079
1080RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1081  Scheduler_Node *needs_idle,
1082  Scheduler_Node *uses_idle,
1083  Thread_Control *idle
1084)
1085{
1086  uses_idle->idle = NULL;
1087  _Scheduler_Node_set_user(
1088    uses_idle,
1089    _Scheduler_Node_get_owner( uses_idle )
1090  );
1091  _Scheduler_Set_idle_thread( needs_idle, idle );
1092}
1093
1094/**
1095 * @brief Block this scheduler node.
1096 *
1097 * @param[in] context The scheduler instance context.
1098 * @param[in] thread The thread which wants to get blocked referencing this
1099 *   node.  This is not necessarily the user of this node in case the node
1100 *   participates in the scheduler helping protocol.
1101 * @param[in] node The node which wants to get blocked.
1102 * @param[in] is_scheduled This node is scheduled.
1103 * @param[in] get_idle_thread Function to get an idle thread.
1104 *
1105 * @retval true Continue with the blocking operation.
1106 * @retval false Otherwise.
1107 */
1108RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
1109  Scheduler_Context         *context,
1110  Thread_Control            *thread,
1111  Scheduler_Node            *node,
1112  bool                       is_scheduled,
1113  Scheduler_Get_idle_thread  get_idle_thread
1114)
1115{
1116  ISR_lock_Context  lock_context;
1117  Thread_Control   *old_user;
1118  Thread_Control   *new_user;
1119
1120  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1121  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1122  _Thread_Scheduler_release_critical( thread, &lock_context );
1123
1124  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1125    _Assert( thread == _Scheduler_Node_get_user( node ) );
1126
1127    return true;
1128  }
1129
1130  new_user = NULL;
1131
1132  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1133    if ( is_scheduled ) {
1134      _Assert( thread == _Scheduler_Node_get_user( node ) );
1135      old_user = thread;
1136      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1137    }
1138  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
1139    if ( is_scheduled ) {
1140      old_user = _Scheduler_Node_get_user( node );
1141
1142      if ( thread == old_user ) {
1143        Thread_Control *owner = _Scheduler_Node_get_owner( node );
1144
1145        if (
1146          thread != owner
1147            && owner->Scheduler.state == THREAD_SCHEDULER_READY
1148        ) {
1149          new_user = owner;
1150          _Scheduler_Node_set_user( node, new_user );
1151        } else {
1152          new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1153        }
1154      }
1155    }
1156  } else {
1157    /* Not implemented, this is part of the OMIP support path. */
1158    _Assert(0);
1159  }
1160
1161  if ( new_user != NULL ) {
1162    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1163
1164    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1165    _Thread_Set_CPU( new_user, cpu );
1166    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1167  }
1168
1169  return false;
1170}
1171
1172/**
1173 * @brief Unblock this scheduler node.
1174 *
1175 * @param[in] context The scheduler instance context.
1176 * @param[in] the_thread The thread which wants to get unblocked.
1177 * @param[in] node The node which wants to get unblocked.
1178 * @param[in] is_scheduled This node is scheduled.
1179 * @param[in] release_idle_thread Function to release an idle thread.
1180 *
1181 * @retval true Continue with the unblocking operation.
1182 * @retval false Otherwise.
1183 */
1184RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1185  Scheduler_Context             *context,
1186  Thread_Control                *the_thread,
1187  Scheduler_Node                *node,
1188  bool                           is_scheduled,
1189  Scheduler_Release_idle_thread  release_idle_thread
1190)
1191{
1192  bool unblock;
1193
1194  if ( is_scheduled ) {
1195    Thread_Control *old_user = _Scheduler_Node_get_user( node );
1196    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1197    Thread_Control *idle = _Scheduler_Release_idle_thread(
1198      context,
1199      node,
1200      release_idle_thread
1201    );
1202    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1203    Thread_Control *new_user;
1204
1205    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1206      _Assert( idle != NULL );
1207      new_user = the_thread;
1208    } else if ( idle != NULL ) {
1209      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1210      new_user = the_thread;
1211    } else if ( the_thread != owner ) {
1212      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1213      _Assert( old_user != the_thread );
1214      _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
1215      new_user = the_thread;
1216      _Scheduler_Node_set_user( node, new_user );
1217    } else {
1218      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1219      _Assert( old_user != the_thread );
1220      _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1221      new_user = NULL;
1222    }
1223
1224    if ( new_user != NULL ) {
1225      _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1226      _Thread_Set_CPU( new_user, cpu );
1227      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1228    }
1229
1230    unblock = false;
1231  } else {
1232    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1233
1234    unblock = true;
1235  }
1236
1237  return unblock;
1238}
1239
1240/**
1241 * @brief Asks a ready scheduler node for help.
1242 *
1243 * @param[in] node The ready node offering help.
1244 * @param[in] needs_help The thread needing help.
1245 *
1246 * @retval needs_help The thread needing help.
1247 */
1248RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
1249  Scheduler_Node *node,
1250  Thread_Control *needs_help
1251)
1252{
1253  _Scheduler_Node_set_user( node, needs_help );
1254
1255  return needs_help;
1256}
1257
1258/**
1259 * @brief Asks a scheduled scheduler node for help.
1260 *
1261 * @param[in] context The scheduler instance context.
1262 * @param[in] node The scheduled node offering help.
1263 * @param[in] offers_help The thread offering help.
1264 * @param[in] needs_help The thread needing help.
1265 * @param[in] previous_accepts_help The previous thread accepting help by this
1266 *   scheduler node.
1267 * @param[in] release_idle_thread Function to release an idle thread.
1268 *
1269 * @retval needs_help The previous thread accepting help by this scheduler node
1270 *   which was displaced by the thread needing help.
1271 * @retval NULL There are no more threads needing help.
1272 */
1273RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
1274  Scheduler_Context             *context,
1275  Scheduler_Node                *node,
1276  Thread_Control                *offers_help,
1277  Thread_Control                *needs_help,
1278  Thread_Control                *previous_accepts_help,
1279  Scheduler_Release_idle_thread  release_idle_thread
1280)
1281{
1282  Thread_Control *next_needs_help = NULL;
1283  Thread_Control *old_user = NULL;
1284  Thread_Control *new_user = NULL;
1285
1286  if (
1287    previous_accepts_help != needs_help
1288      && _Scheduler_Thread_get_node( previous_accepts_help ) == node
1289  ) {
1290    Thread_Control *idle = _Scheduler_Release_idle_thread(
1291      context,
1292      node,
1293      release_idle_thread
1294    );
1295
1296    if ( idle != NULL ) {
1297      old_user = idle;
1298    } else {
1299      _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
1300      old_user = previous_accepts_help;
1301    }
1302
1303    if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1304      new_user = needs_help;
1305    } else {
1306      _Assert(
1307        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1308          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1309      );
1310      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
1311
1312      new_user = offers_help;
1313    }
1314
1315    if ( previous_accepts_help != offers_help ) {
1316      next_needs_help = previous_accepts_help;
1317    }
1318  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1319    Thread_Control *idle = _Scheduler_Release_idle_thread(
1320      context,
1321      node,
1322      release_idle_thread
1323    );
1324
1325    if ( idle != NULL ) {
1326      old_user = idle;
1327    } else {
1328      old_user = _Scheduler_Node_get_user( node );
1329    }
1330
1331    new_user = needs_help;
1332  } else {
1333    _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
1334  }
1335
1336  if ( new_user != old_user ) {
1337    Per_CPU_Control *cpu_self = _Per_CPU_Get();
1338    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1339
1340    _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
1341    _Scheduler_Thread_set_scheduler_and_node(
1342      old_user,
1343      _Thread_Scheduler_get_own_node( old_user ),
1344      old_user
1345    );
1346
1347    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1348    _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
1349
1350    _Scheduler_Node_set_user( node, new_user );
1351    _Thread_Set_CPU( new_user, cpu );
1352    _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
1353  }
1354
1355  return next_needs_help;
1356}
1357
1358/**
1359 * @brief Asks a blocked scheduler node for help.
1360 *
1361 * @param[in] context The scheduler instance context.
1362 * @param[in] node The scheduled node offering help.
1363 * @param[in] offers_help The thread offering help.
1364 * @param[in] needs_help The thread needing help.
1365 *
1366 * @retval true Enqueue this scheduler node.
1367 * @retval false Otherwise.
1368 */
1369RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
1370  Scheduler_Context *context,
1371  Scheduler_Node    *node,
1372  Thread_Control    *offers_help,
1373  Thread_Control    *needs_help
1374)
1375{
1376  bool enqueue;
1377
1378  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1379
1380  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1381    _Scheduler_Node_set_user( node, needs_help );
1382    _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
1383
1384    enqueue = true;
1385  } else {
1386    enqueue = false;
1387  }
1388
1389  return enqueue;
1390}
1391#endif
1392
1393RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1394  Thread_Control *new_heir,
1395  bool            force_dispatch
1396)
1397{
1398  Thread_Control *heir = _Thread_Heir;
1399
1400  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1401#if defined(RTEMS_SMP)
1402    /*
1403     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1404     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1405     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1406     * schedulers.
1407     */
1408    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1409    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1410#endif
1411    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1412    _Thread_Heir = new_heir;
1413    _Thread_Dispatch_necessary = true;
1414  }
1415}
1416
1417RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1418  const Scheduler_Control *new_scheduler,
1419  Thread_Control          *the_thread,
1420  Priority_Control         priority
1421)
1422{
1423  Scheduler_Node *new_scheduler_node;
1424  Scheduler_Node *old_scheduler_node;
1425
1426  if (
1427    _Thread_Owns_resources( the_thread )
1428      || the_thread->Wait.queue != NULL
1429  ) {
1430    return STATUS_RESOURCE_IN_USE;
1431  }
1432
1433  old_scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1434  _Priority_Plain_extract(
1435    &old_scheduler_node->Wait.Priority,
1436    &the_thread->Real_priority
1437  );
1438
1439  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1440    _Priority_Plain_insert(
1441      &old_scheduler_node->Wait.Priority,
1442      &the_thread->Real_priority,
1443      the_thread->Real_priority.priority
1444    );
1445    return STATUS_RESOURCE_IN_USE;
1446  }
1447
1448#if defined(RTEMS_SMP)
1449  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1450    the_thread,
1451    _Scheduler_Get_index( new_scheduler )
1452  );
1453#else
1454  new_scheduler_node = old_scheduler_node;
1455#endif
1456
1457  the_thread->Start.initial_priority = priority;
1458  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1459  _Priority_Initialize_one(
1460    &new_scheduler_node->Wait.Priority,
1461    &the_thread->Real_priority
1462  );
1463
1464#if defined(RTEMS_SMP)
1465  _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1466  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1467  _Chain_Initialize_one(
1468    &the_thread->Scheduler.Wait_nodes,
1469    &new_scheduler_node->Thread.Wait_node
1470  );
1471
1472  {
1473    const Scheduler_Control *old_scheduler;
1474
1475    old_scheduler = _Scheduler_Get( the_thread );
1476
1477    if ( old_scheduler != new_scheduler ) {
1478      States_Control current_state;
1479
1480      current_state = the_thread->current_state;
1481
1482      if ( _States_Is_ready( current_state ) ) {
1483        _Scheduler_Block( the_thread );
1484      }
1485
1486      the_thread->Scheduler.own_control = new_scheduler;
1487      the_thread->Scheduler.control = new_scheduler;
1488      the_thread->Scheduler.own_node = new_scheduler_node;
1489      the_thread->Scheduler.node = new_scheduler_node;
1490      _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1491
1492      if ( _States_Is_ready( current_state ) ) {
1493        _Scheduler_Unblock( the_thread );
1494      }
1495
1496      return STATUS_SUCCESSFUL;
1497    }
1498  }
1499#endif
1500
1501  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1502  _Scheduler_Update_priority( the_thread );
1503  return STATUS_SUCCESSFUL;
1504}
1505
1506/** @} */
1507
1508#ifdef __cplusplus
1509}
1510#endif
1511
1512#endif
1513/* end of include file */
Note: See TracBrowser for help on using the repository browser.