source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 6b1d8c7

5
Last change on this file since 6b1d8c7 was 6b1d8c7, checked in by Sebastian Huber <sebastian.huber@…>, on 07/04/17 at 05:28:44

score: Add processor set to scheduler context

Replace the simple processor count with the processor set owned by the
scheduler instance.

Update #3059.

  • Property mode set to 100644
File size: 33.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2017 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/cpusetimpl.h>
26#include <rtems/score/priorityimpl.h>
27#include <rtems/score/smpimpl.h>
28#include <rtems/score/status.h>
29#include <rtems/score/threadimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35/**
36 * @addtogroup ScoreScheduler
37 */
38/**@{**/
39
40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
57  const Per_CPU_Control *cpu
58)
59{
60#if defined(RTEMS_SMP)
61  return cpu->Scheduler.control;
62#else
63  (void) cpu;
64  return &_Scheduler_Table[ 0 ];
65#endif
66}
67
68/**
69 * @brief Acquires the scheduler instance inside a critical section (interrupts
70 * disabled).
71 *
72 * @param[in] scheduler The scheduler instance.
73 * @param[in] lock_context The lock context to use for
74 *   _Scheduler_Release_critical().
75 */
76RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
77  const Scheduler_Control *scheduler,
78  ISR_lock_Context        *lock_context
79)
80{
81#if defined(RTEMS_SMP)
82  Scheduler_Context *context;
83
84  context = _Scheduler_Get_context( scheduler );
85  _ISR_lock_Acquire( &context->Lock, lock_context );
86#else
87  (void) scheduler;
88  (void) lock_context;
89#endif
90}
91
92/**
93 * @brief Releases the scheduler instance inside a critical section (interrupts
94 * disabled).
95 *
96 * @param[in] scheduler The scheduler instance.
97 * @param[in] lock_context The lock context used for
98 *   _Scheduler_Acquire_critical().
99 */
100RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
101  const Scheduler_Control *scheduler,
102  ISR_lock_Context        *lock_context
103)
104{
105#if defined(RTEMS_SMP)
106  Scheduler_Context *context;
107
108  context = _Scheduler_Get_context( scheduler );
109  _ISR_lock_Release( &context->Lock, lock_context );
110#else
111  (void) scheduler;
112  (void) lock_context;
113#endif
114}
115
116#if defined(RTEMS_SMP)
117void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
118
119/**
120 * @brief Registers an ask for help request if necessary.
121 *
122 * The actual ask for help operation is carried out during
123 * _Thread_Do_dispatch() on a processor related to the thread.  This yields a
124 * better separation of scheduler instances.  A thread of one scheduler
125 * instance should not be forced to carry out too much work for threads on
126 * other scheduler instances.
127 *
128 * @param[in] the_thread The thread in need for help.
129 */
130RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
131{
132  _Assert( _Thread_State_is_owner( the_thread ) );
133
134  if ( the_thread->Scheduler.helping_nodes > 0 ) {
135    _Scheduler_Request_ask_for_help( the_thread );
136  }
137}
138#endif
139
140/**
141 * The preferred method to add a new scheduler is to define the jump table
142 * entries and add a case to the _Scheduler_Initialize routine.
143 *
144 * Generic scheduling implementations that rely on the ready queue only can
145 * be found in the _Scheduler_queue_XXX functions.
146 */
147
148/*
149 * Passing the Scheduler_Control* to these functions allows for multiple
150 * scheduler's to exist simultaneously, which could be useful on an SMP
151 * system.  Then remote Schedulers may be accessible.  How to protect such
152 * accesses remains an open problem.
153 */
154
155/**
156 * @brief General scheduling decision.
157 *
158 * This kernel routine implements the scheduling decision logic for
159 * the scheduler. It does NOT dispatch.
160 *
161 * @param[in] the_thread The thread which state changed previously.
162 */
163RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
164{
165  const Scheduler_Control *scheduler;
166  ISR_lock_Context         lock_context;
167
168  scheduler = _Thread_Scheduler_get_home( the_thread );
169  _Scheduler_Acquire_critical( scheduler, &lock_context );
170
171  ( *scheduler->Operations.schedule )( scheduler, the_thread );
172
173  _Scheduler_Release_critical( scheduler, &lock_context );
174}
175
176/**
177 * @brief Scheduler yield with a particular thread.
178 *
179 * This routine is invoked when a thread wishes to voluntarily transfer control
180 * of the processor to another thread.
181 *
182 * @param[in] the_thread The yielding thread.
183 */
184RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
185{
186  const Scheduler_Control *scheduler;
187  ISR_lock_Context         lock_context;
188
189  scheduler = _Thread_Scheduler_get_home( the_thread );
190  _Scheduler_Acquire_critical( scheduler, &lock_context );
191  ( *scheduler->Operations.yield )(
192    scheduler,
193    the_thread,
194    _Thread_Scheduler_get_home_node( the_thread )
195  );
196  _Scheduler_Release_critical( scheduler, &lock_context );
197}
198
199/**
200 * @brief Blocks a thread with respect to the scheduler.
201 *
202 * This routine removes @a the_thread from the scheduling decision for
203 * the scheduler. The primary task is to remove the thread from the
204 * ready queue.  It performs any necessary schedulering operations
205 * including the selection of a new heir thread.
206 *
207 * @param[in] the_thread The thread.
208 */
209RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
210{
211#if defined(RTEMS_SMP)
212  Chain_Node              *node;
213  const Chain_Node        *tail;
214  Scheduler_Node          *scheduler_node;
215  const Scheduler_Control *scheduler;
216  ISR_lock_Context         lock_context;
217
218  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
219  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
220
221  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
222  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
223
224  _Scheduler_Acquire_critical( scheduler, &lock_context );
225  ( *scheduler->Operations.block )(
226    scheduler,
227    the_thread,
228    scheduler_node
229  );
230  _Scheduler_Release_critical( scheduler, &lock_context );
231
232  node = _Chain_Next( node );
233
234  while ( node != tail ) {
235    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
236    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
237
238    _Scheduler_Acquire_critical( scheduler, &lock_context );
239    ( *scheduler->Operations.withdraw_node )(
240      scheduler,
241      the_thread,
242      scheduler_node,
243      THREAD_SCHEDULER_BLOCKED
244    );
245    _Scheduler_Release_critical( scheduler, &lock_context );
246
247    node = _Chain_Next( node );
248  }
249#else
250  const Scheduler_Control *scheduler;
251
252  scheduler = _Thread_Scheduler_get_home( the_thread );
253  ( *scheduler->Operations.block )(
254    scheduler,
255    the_thread,
256    _Thread_Scheduler_get_home_node( the_thread )
257  );
258#endif
259}
260
261/**
262 * @brief Unblocks a thread with respect to the scheduler.
263 *
264 * This operation must fetch the latest thread priority value for this
265 * scheduler instance and update its internal state if necessary.
266 *
267 * @param[in] the_thread The thread.
268 *
269 * @see _Scheduler_Node_get_priority().
270 */
271RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
272{
273  const Scheduler_Control *scheduler;
274  ISR_lock_Context         lock_context;
275
276  scheduler = _Thread_Scheduler_get_home( the_thread );
277  _Scheduler_Acquire_critical( scheduler, &lock_context );
278  ( *scheduler->Operations.unblock )(
279    scheduler,
280    the_thread,
281    _Thread_Scheduler_get_home_node( the_thread )
282  );
283  _Scheduler_Release_critical( scheduler, &lock_context );
284}
285
286/**
287 * @brief Propagates a priority change of a thread to the scheduler.
288 *
289 * On uni-processor configurations, this operation must evaluate the thread
290 * state.  In case the thread is not ready, then the priority update should be
291 * deferred to the next scheduler unblock operation.
292 *
293 * The operation must update the heir and thread dispatch necessary variables
294 * in case the set of scheduled threads changes.
295 *
296 * @param[in] the_thread The thread changing its priority.
297 *
298 * @see _Scheduler_Node_get_priority().
299 */
300RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
301{
302#if defined(RTEMS_SMP)
303  Chain_Node       *node;
304  const Chain_Node *tail;
305
306  _Thread_Scheduler_process_requests( the_thread );
307
308  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
309  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
310
311  do {
312    Scheduler_Node          *scheduler_node;
313    const Scheduler_Control *scheduler;
314    ISR_lock_Context         lock_context;
315
316    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
317    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
318
319    _Scheduler_Acquire_critical( scheduler, &lock_context );
320    ( *scheduler->Operations.update_priority )(
321      scheduler,
322      the_thread,
323      scheduler_node
324    );
325    _Scheduler_Release_critical( scheduler, &lock_context );
326
327    node = _Chain_Next( node );
328  } while ( node != tail );
329#else
330  const Scheduler_Control *scheduler;
331
332  scheduler = _Thread_Scheduler_get_home( the_thread );
333  ( *scheduler->Operations.update_priority )(
334    scheduler,
335    the_thread,
336    _Thread_Scheduler_get_home_node( the_thread )
337  );
338#endif
339}
340
341#if defined(RTEMS_SMP)
342/**
343 * @brief Changes the sticky level of the home scheduler node and propagates a
344 * priority change of a thread to the scheduler.
345 *
346 * @param[in] the_thread The thread changing its priority or sticky level.
347 *
348 * @see _Scheduler_Update_priority().
349 */
350RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
351  Thread_Control *the_thread,
352  int             sticky_level_change
353)
354{
355  Chain_Node              *node;
356  const Chain_Node        *tail;
357  Scheduler_Node          *scheduler_node;
358  const Scheduler_Control *scheduler;
359  ISR_lock_Context         lock_context;
360
361  _Thread_Scheduler_process_requests( the_thread );
362
363  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
364  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
365  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
366
367  _Scheduler_Acquire_critical( scheduler, &lock_context );
368
369  scheduler_node->sticky_level += sticky_level_change;
370  _Assert( scheduler_node->sticky_level >= 0 );
371
372  ( *scheduler->Operations.update_priority )(
373    scheduler,
374    the_thread,
375    scheduler_node
376  );
377
378  _Scheduler_Release_critical( scheduler, &lock_context );
379
380  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
381  node = _Chain_Next( node );
382
383  while ( node != tail ) {
384    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
385    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
386
387    _Scheduler_Acquire_critical( scheduler, &lock_context );
388    ( *scheduler->Operations.update_priority )(
389      scheduler,
390      the_thread,
391      scheduler_node
392    );
393    _Scheduler_Release_critical( scheduler, &lock_context );
394
395    node = _Chain_Next( node );
396  }
397}
398#endif
399
400/**
401 * @brief Maps a thread priority from the user domain to the scheduler domain.
402 *
403 * Let M be the maximum scheduler priority.  The mapping must be bijective in
404 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
405 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
406 * other values the mapping is undefined.
407 *
408 * @param[in] scheduler The scheduler instance.
409 * @param[in] priority The user domain thread priority.
410 *
411 * @return The corresponding thread priority of the scheduler domain is returned.
412 */
413RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
414  const Scheduler_Control *scheduler,
415  Priority_Control         priority
416)
417{
418  return ( *scheduler->Operations.map_priority )( scheduler, priority );
419}
420
421/**
422 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
423 *
424 * @param[in] scheduler The scheduler instance.
425 * @param[in] priority The scheduler domain thread priority.
426 *
427 * @return The corresponding thread priority of the user domain is returned.
428 */
429RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
430  const Scheduler_Control *scheduler,
431  Priority_Control         priority
432)
433{
434  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
435}
436
437/**
438 * @brief Initializes a scheduler node.
439 *
440 * The scheduler node contains arbitrary data on function entry.  The caller
441 * must ensure that _Scheduler_Node_destroy() will be called after a
442 * _Scheduler_Node_initialize() before the memory of the scheduler node is
443 * destroyed.
444 *
445 * @param[in] scheduler The scheduler instance.
446 * @param[in] node The scheduler node to initialize.
447 * @param[in] the_thread The thread of the scheduler node to initialize.
448 * @param[in] priority The thread priority.
449 */
450RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
451  const Scheduler_Control *scheduler,
452  Scheduler_Node          *node,
453  Thread_Control          *the_thread,
454  Priority_Control         priority
455)
456{
457  ( *scheduler->Operations.node_initialize )(
458    scheduler,
459    node,
460    the_thread,
461    priority
462  );
463}
464
465/**
466 * @brief Destroys a scheduler node.
467 *
468 * The caller must ensure that _Scheduler_Node_destroy() will be called only
469 * after a corresponding _Scheduler_Node_initialize().
470 *
471 * @param[in] scheduler The scheduler instance.
472 * @param[in] node The scheduler node to destroy.
473 */
474RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
475  const Scheduler_Control *scheduler,
476  Scheduler_Node          *node
477)
478{
479  ( *scheduler->Operations.node_destroy )( scheduler, node );
480}
481
482/**
483 * @brief Releases a job of a thread with respect to the scheduler.
484 *
485 * @param[in] the_thread The thread.
486 * @param[in] priority_node The priority node of the job.
487 * @param[in] deadline The deadline in watchdog ticks since boot.
488 * @param[in] queue_context The thread queue context to provide the set of
489 *   threads for _Thread_Priority_update().
490 */
491RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
492  Thread_Control       *the_thread,
493  Priority_Node        *priority_node,
494  uint64_t              deadline,
495  Thread_queue_Context *queue_context
496)
497{
498  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
499
500  _Thread_queue_Context_clear_priority_updates( queue_context );
501  ( *scheduler->Operations.release_job )(
502    scheduler,
503    the_thread,
504    priority_node,
505    deadline,
506    queue_context
507  );
508}
509
510/**
511 * @brief Cancels a job of a thread with respect to the scheduler.
512 *
513 * @param[in] the_thread The thread.
514 * @param[in] priority_node The priority node of the job.
515 * @param[in] queue_context The thread queue context to provide the set of
516 *   threads for _Thread_Priority_update().
517 */
518RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
519  Thread_Control       *the_thread,
520  Priority_Node        *priority_node,
521  Thread_queue_Context *queue_context
522)
523{
524  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
525
526  _Thread_queue_Context_clear_priority_updates( queue_context );
527  ( *scheduler->Operations.cancel_job )(
528    scheduler,
529    the_thread,
530    priority_node,
531    queue_context
532  );
533}
534
535/**
536 * @brief Scheduler method invoked at each clock tick.
537 *
538 * This method is invoked at each clock tick to allow the scheduler
539 * implementation to perform any activities required.  For the
540 * scheduler which support standard RTEMS features, this includes
541 * time-slicing management.
542 */
543RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
544{
545  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
546  Thread_Control *executing = cpu->executing;
547
548  if ( scheduler != NULL && executing != NULL ) {
549    ( *scheduler->Operations.tick )( scheduler, executing );
550  }
551}
552
553/**
554 * @brief Starts the idle thread for a particular processor.
555 *
556 * @param[in] scheduler The scheduler instance.
557 * @param[in,out] the_thread The idle thread for the processor.
558 * @param[in,out] cpu The processor for the idle thread.
559 *
560 * @see _Thread_Create_idle().
561 */
562RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
563  const Scheduler_Control *scheduler,
564  Thread_Control          *the_thread,
565  Per_CPU_Control         *cpu
566)
567{
568  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
569}
570
571RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
572  const Scheduler_Control *scheduler,
573  uint32_t                 cpu_index
574)
575{
576#if defined(RTEMS_SMP)
577  const Per_CPU_Control   *cpu;
578  const Scheduler_Control *scheduler_of_cpu;
579
580  cpu = _Per_CPU_Get_by_index( cpu_index );
581  scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
582
583  return scheduler_of_cpu == scheduler;
584#else
585  (void) scheduler;
586  (void) cpu_index;
587
588  return true;
589#endif
590}
591
592RTEMS_INLINE_ROUTINE const Processor_mask *_Scheduler_Get_processors(
593  const Scheduler_Control *scheduler
594)
595{
596#if defined(RTEMS_SMP)
597  return &_Scheduler_Get_context( scheduler )->Processors;
598#else
599  return &_Processor_mask_The_one_and_only;
600#endif
601}
602
603bool _Scheduler_Get_affinity(
604  Thread_Control *the_thread,
605  size_t          cpusetsize,
606  cpu_set_t      *cpuset
607);
608
609RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
610  const Scheduler_Control *scheduler,
611  Thread_Control          *the_thread,
612  size_t                   cpusetsize,
613  const cpu_set_t         *cpuset
614)
615{
616  uint32_t cpu_count = _SMP_Get_processor_count();
617  uint32_t cpu_index;
618  bool     ok = true;
619
620  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
621#if defined(RTEMS_SMP)
622    const Per_CPU_Control   *cpu;
623    const Scheduler_Control *scheduler_of_cpu;
624
625    cpu = _Per_CPU_Get_by_index( cpu_index );
626    scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
627
628    ok = ok
629      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
630        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
631          && scheduler != scheduler_of_cpu ) );
632#else
633    (void) scheduler;
634
635    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
636#endif
637  }
638
639  return ok;
640}
641
642bool _Scheduler_Set_affinity(
643  Thread_Control  *the_thread,
644  size_t           cpusetsize,
645  const cpu_set_t *cpuset
646);
647
648RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
649  const Scheduler_Control *scheduler,
650  Thread_Control          *the_thread,
651  Scheduler_Node          *node,
652  void                  ( *extract )(
653                             const Scheduler_Control *,
654                             Thread_Control *,
655                             Scheduler_Node *
656                        ),
657  void                  ( *schedule )(
658                             const Scheduler_Control *,
659                             Thread_Control *,
660                             bool
661                        )
662)
663{
664  ( *extract )( scheduler, the_thread, node );
665
666  /* TODO: flash critical section? */
667
668  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
669    ( *schedule )( scheduler, the_thread, true );
670  }
671}
672
673RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
674  const Scheduler_Control *scheduler
675)
676{
677#if defined(RTEMS_SMP)
678  const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
679
680  return _Processor_mask_Count( &context->Processors );
681#else
682  (void) scheduler;
683
684  return 1;
685#endif
686}
687
688RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
689{
690  return _Objects_Build_id(
691    OBJECTS_FAKE_OBJECTS_API,
692    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
693    _Objects_Local_node,
694    (uint16_t) ( scheduler_index + 1 )
695  );
696}
697
698RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
699{
700  uint32_t minimum_id = _Scheduler_Build_id( 0 );
701
702  return id - minimum_id;
703}
704
705RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
706  Objects_Id id
707)
708{
709  uint32_t index;
710
711  index = _Scheduler_Get_index_by_id( id );
712
713  if ( index >= _Scheduler_Count ) {
714    return NULL;
715  }
716
717  return &_Scheduler_Table[ index ];
718}
719
720RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
721  const Scheduler_Control *scheduler
722)
723{
724  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
725}
726
727RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
728  Thread_Control   *the_thread,
729  Priority_Control  new_priority,
730  bool              prepend_it
731)
732{
733  Scheduler_Node *scheduler_node;
734
735  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
736  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
737}
738
739#if defined(RTEMS_SMP)
740/**
741 * @brief Gets an idle thread from the scheduler instance.
742 *
743 * @param[in] context The scheduler instance context.
744 *
745 * @retval idle An idle thread for use.  This function must always return an
746 * idle thread.  If none is available, then this is a fatal error.
747 */
748typedef Thread_Control *( *Scheduler_Get_idle_thread )(
749  Scheduler_Context *context
750);
751
752/**
753 * @brief Releases an idle thread to the scheduler instance for reuse.
754 *
755 * @param[in] context The scheduler instance context.
756 * @param[in] idle The idle thread to release
757 */
758typedef void ( *Scheduler_Release_idle_thread )(
759  Scheduler_Context *context,
760  Thread_Control    *idle
761);
762
763RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
764  Thread_Control         *the_thread,
765  Thread_Scheduler_state  new_state
766)
767{
768  _Assert(
769    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
770      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
771      || !_System_state_Is_up( _System_state_Get() )
772  );
773
774  the_thread->Scheduler.state = new_state;
775}
776
777RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
778  Scheduler_Node *node,
779  Thread_Control *idle
780)
781{
782  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
783  _Assert(
784    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
785  );
786
787  _Scheduler_Node_set_user( node, idle );
788  node->idle = idle;
789}
790
791/**
792 * @brief Use an idle thread for this scheduler node.
793 *
794 * A thread those home scheduler node has a sticky level greater than zero may
795 * use an idle thread in the home scheduler instance in case it executes
796 * currently in another scheduler instance or in case it is in a blocking
797 * state.
798 *
799 * @param[in] context The scheduler instance context.
800 * @param[in] node The node which wants to use the idle thread.
801 * @param[in] cpu The processor for the idle thread.
802 * @param[in] get_idle_thread Function to get an idle thread.
803 */
804RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
805  Scheduler_Context         *context,
806  Scheduler_Node            *node,
807  Per_CPU_Control           *cpu,
808  Scheduler_Get_idle_thread  get_idle_thread
809)
810{
811  Thread_Control *idle = ( *get_idle_thread )( context );
812
813  _Scheduler_Set_idle_thread( node, idle );
814  _Thread_Set_CPU( idle, cpu );
815  return idle;
816}
817
818typedef enum {
819  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
820  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
821  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
822} Scheduler_Try_to_schedule_action;
823
824/**
825 * @brief Try to schedule this scheduler node.
826 *
827 * @param[in] context The scheduler instance context.
828 * @param[in] node The node which wants to get scheduled.
829 * @param[in] idle A potential idle thread used by a potential victim node.
830 * @param[in] get_idle_thread Function to get an idle thread.
831 *
832 * @retval true This node can be scheduled.
833 * @retval false Otherwise.
834 */
835RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
836_Scheduler_Try_to_schedule_node(
837  Scheduler_Context         *context,
838  Scheduler_Node            *node,
839  Thread_Control            *idle,
840  Scheduler_Get_idle_thread  get_idle_thread
841)
842{
843  ISR_lock_Context                  lock_context;
844  Scheduler_Try_to_schedule_action  action;
845  Thread_Control                   *owner;
846
847  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
848  owner = _Scheduler_Node_get_owner( node );
849  _Assert( _Scheduler_Node_get_user( node ) == owner );
850  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
851
852  _Thread_Scheduler_acquire_critical( owner, &lock_context );
853
854  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
855    _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
856    _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
857  } else if (
858    owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
859      && node->sticky_level <= 1
860  ) {
861    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
862  } else if ( node->sticky_level == 0 ) {
863    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
864  } else if ( idle != NULL ) {
865    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
866  } else {
867    _Scheduler_Use_idle_thread(
868      context,
869      node,
870      _Thread_Get_CPU( owner ),
871      get_idle_thread
872    );
873  }
874
875  _Thread_Scheduler_release_critical( owner, &lock_context );
876  return action;
877}
878
879/**
880 * @brief Release an idle thread using this scheduler node.
881 *
882 * @param[in] context The scheduler instance context.
883 * @param[in] node The node which may have an idle thread as user.
884 * @param[in] release_idle_thread Function to release an idle thread.
885 *
886 * @retval idle The idle thread which used this node.
887 * @retval NULL This node had no idle thread as an user.
888 */
889RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
890  Scheduler_Context             *context,
891  Scheduler_Node                *node,
892  Scheduler_Release_idle_thread  release_idle_thread
893)
894{
895  Thread_Control *idle = _Scheduler_Node_get_idle( node );
896
897  if ( idle != NULL ) {
898    Thread_Control *owner = _Scheduler_Node_get_owner( node );
899
900    node->idle = NULL;
901    _Scheduler_Node_set_user( node, owner );
902    ( *release_idle_thread )( context, idle );
903  }
904
905  return idle;
906}
907
908RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
909  Scheduler_Node *needs_idle,
910  Scheduler_Node *uses_idle,
911  Thread_Control *idle
912)
913{
914  uses_idle->idle = NULL;
915  _Scheduler_Node_set_user(
916    uses_idle,
917    _Scheduler_Node_get_owner( uses_idle )
918  );
919  _Scheduler_Set_idle_thread( needs_idle, idle );
920}
921
922/**
923 * @brief Block this scheduler node.
924 *
925 * @param[in] context The scheduler instance context.
926 * @param[in] thread The thread which wants to get blocked referencing this
927 *   node.  This is not necessarily the user of this node in case the node
928 *   participates in the scheduler helping protocol.
929 * @param[in] node The node which wants to get blocked.
930 * @param[in] is_scheduled This node is scheduled.
931 * @param[in] get_idle_thread Function to get an idle thread.
932 *
933 * @retval thread_cpu The processor of the thread.  Indicates to continue with
934 *   the blocking operation.
935 * @retval NULL Otherwise.
936 */
937RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
938  Scheduler_Context         *context,
939  Thread_Control            *thread,
940  Scheduler_Node            *node,
941  bool                       is_scheduled,
942  Scheduler_Get_idle_thread  get_idle_thread
943)
944{
945  int               sticky_level;
946  ISR_lock_Context  lock_context;
947  Per_CPU_Control  *thread_cpu;
948
949  sticky_level = node->sticky_level;
950  --sticky_level;
951  node->sticky_level = sticky_level;
952  _Assert( sticky_level >= 0 );
953
954  _Thread_Scheduler_acquire_critical( thread, &lock_context );
955  thread_cpu = _Thread_Get_CPU( thread );
956  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
957  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
958  _Thread_Scheduler_release_critical( thread, &lock_context );
959
960  if ( sticky_level > 0 ) {
961    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
962      Thread_Control *idle;
963
964      idle = _Scheduler_Use_idle_thread(
965        context,
966        node,
967        thread_cpu,
968        get_idle_thread
969      );
970      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
971    }
972
973    return NULL;
974  }
975
976  _Assert( thread == _Scheduler_Node_get_user( node ) );
977  return thread_cpu;
978}
979
980RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
981  Scheduler_Context             *context,
982  Thread_Control                *the_thread,
983  Scheduler_Node                *node,
984  Scheduler_Release_idle_thread  release_idle_thread
985)
986{
987  Thread_Control  *idle;
988  Thread_Control  *owner;
989  Per_CPU_Control *cpu;
990
991  idle = _Scheduler_Node_get_idle( node );
992  owner = _Scheduler_Node_get_owner( node );
993
994  node->idle = NULL;
995  _Assert( _Scheduler_Node_get_user( node ) == idle );
996  _Scheduler_Node_set_user( node, owner );
997  ( *release_idle_thread )( context, idle );
998
999  cpu = _Thread_Get_CPU( idle );
1000  _Thread_Set_CPU( the_thread, cpu );
1001  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
1002}
1003
1004/**
1005 * @brief Unblock this scheduler node.
1006 *
1007 * @param[in] context The scheduler instance context.
1008 * @param[in] the_thread The thread which wants to get unblocked.
1009 * @param[in] node The node which wants to get unblocked.
1010 * @param[in] is_scheduled This node is scheduled.
1011 * @param[in] release_idle_thread Function to release an idle thread.
1012 *
1013 * @retval true Continue with the unblocking operation.
1014 * @retval false Otherwise.
1015 */
1016RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1017  Scheduler_Context             *context,
1018  Thread_Control                *the_thread,
1019  Scheduler_Node                *node,
1020  bool                           is_scheduled,
1021  Scheduler_Release_idle_thread  release_idle_thread
1022)
1023{
1024  bool unblock;
1025
1026  ++node->sticky_level;
1027  _Assert( node->sticky_level > 0 );
1028
1029  if ( is_scheduled ) {
1030    _Scheduler_Discard_idle_thread(
1031      context,
1032      the_thread,
1033      node,
1034      release_idle_thread
1035    );
1036    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1037    unblock = false;
1038  } else {
1039    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1040    unblock = true;
1041  }
1042
1043  return unblock;
1044}
1045#endif
1046
1047RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1048  Thread_Control *new_heir,
1049  bool            force_dispatch
1050)
1051{
1052  Thread_Control *heir = _Thread_Heir;
1053
1054  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1055#if defined(RTEMS_SMP)
1056    /*
1057     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1058     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1059     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1060     * schedulers.
1061     */
1062    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1063    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1064#endif
1065    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1066    _Thread_Heir = new_heir;
1067    _Thread_Dispatch_necessary = true;
1068  }
1069}
1070
1071RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1072  const Scheduler_Control *new_scheduler,
1073  Thread_Control          *the_thread,
1074  Priority_Control         priority
1075)
1076{
1077  Scheduler_Node          *new_scheduler_node;
1078  Scheduler_Node          *old_scheduler_node;
1079#if defined(RTEMS_SMP)
1080  ISR_lock_Context         lock_context;
1081  const Scheduler_Control *old_scheduler;
1082
1083#endif
1084
1085  if ( the_thread->Wait.queue != NULL ) {
1086    return STATUS_RESOURCE_IN_USE;
1087  }
1088
1089  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1090  _Priority_Plain_extract(
1091    &old_scheduler_node->Wait.Priority,
1092    &the_thread->Real_priority
1093  );
1094
1095  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1096    _Priority_Plain_insert(
1097      &old_scheduler_node->Wait.Priority,
1098      &the_thread->Real_priority,
1099      the_thread->Real_priority.priority
1100    );
1101    return STATUS_RESOURCE_IN_USE;
1102  }
1103
1104#if defined(RTEMS_SMP)
1105  if ( !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes ) ) {
1106    _Priority_Plain_insert(
1107      &old_scheduler_node->Wait.Priority,
1108      &the_thread->Real_priority,
1109      the_thread->Real_priority.priority
1110    );
1111    return STATUS_RESOURCE_IN_USE;
1112  }
1113
1114  old_scheduler = _Thread_Scheduler_get_home( the_thread );
1115
1116  _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1117
1118  if ( _Scheduler_Get_processor_count( new_scheduler ) == 0 ) {
1119    _Scheduler_Release_critical( new_scheduler, &lock_context );
1120    _Priority_Plain_insert(
1121      &old_scheduler_node->Wait.Priority,
1122      &the_thread->Real_priority,
1123      the_thread->Real_priority.priority
1124    );
1125    return STATUS_UNSATISFIED;
1126  }
1127
1128  the_thread->Scheduler.home = new_scheduler;
1129
1130  _Scheduler_Release_critical( new_scheduler, &lock_context );
1131
1132  _Thread_Scheduler_process_requests( the_thread );
1133  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1134    the_thread,
1135    _Scheduler_Get_index( new_scheduler )
1136  );
1137#else
1138  new_scheduler_node = old_scheduler_node;
1139#endif
1140
1141  the_thread->Start.initial_priority = priority;
1142  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1143  _Priority_Initialize_one(
1144    &new_scheduler_node->Wait.Priority,
1145    &the_thread->Real_priority
1146  );
1147
1148#if defined(RTEMS_SMP)
1149  if ( old_scheduler != new_scheduler ) {
1150    States_Control current_state;
1151
1152    current_state = the_thread->current_state;
1153
1154    if ( _States_Is_ready( current_state ) ) {
1155      _Scheduler_Block( the_thread );
1156    }
1157
1158    _Assert( old_scheduler_node->sticky_level == 0 );
1159    _Assert( new_scheduler_node->sticky_level == 0 );
1160
1161    _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1162    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1163    _Chain_Initialize_one(
1164      &the_thread->Scheduler.Wait_nodes,
1165      &new_scheduler_node->Thread.Wait_node
1166    );
1167    _Chain_Extract_unprotected(
1168      &old_scheduler_node->Thread.Scheduler_node.Chain
1169    );
1170    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1171    _Chain_Initialize_one(
1172      &the_thread->Scheduler.Scheduler_nodes,
1173      &new_scheduler_node->Thread.Scheduler_node.Chain
1174    );
1175
1176    _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1177
1178    if ( _States_Is_ready( current_state ) ) {
1179      _Scheduler_Unblock( the_thread );
1180    }
1181
1182    return STATUS_SUCCESSFUL;
1183  }
1184#endif
1185
1186  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1187  _Scheduler_Update_priority( the_thread );
1188  return STATUS_SUCCESSFUL;
1189}
1190
1191/** @} */
1192
1193#ifdef __cplusplus
1194}
1195#endif
1196
1197#endif
1198/* end of include file */
Note: See TracBrowser for help on using the repository browser.