source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ ec17ad4

5
Last change on this file since ec17ad4 was ec17ad4, checked in by Sebastian Huber <sebastian.huber@…>, on 11/23/16 at 06:16:41

score: Delete obsolete scheduler debug aid

  • Property mode set to 100644
File size: 35.7 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/cpusetimpl.h>
26#include <rtems/score/priorityimpl.h>
27#include <rtems/score/smpimpl.h>
28#include <rtems/score/status.h>
29#include <rtems/score/threadimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35/**
36 * @addtogroup ScoreScheduler
37 */
38/**@{**/
39
40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
57  const Per_CPU_Control *cpu
58)
59{
60#if defined(RTEMS_SMP)
61  return cpu->Scheduler.control;
62#else
63  (void) cpu;
64  return &_Scheduler_Table[ 0 ];
65#endif
66}
67
68/**
69 * @brief Acquires the scheduler instance inside a critical section (interrupts
70 * disabled).
71 *
72 * @param[in] scheduler The scheduler instance.
73 * @param[in] lock_context The lock context to use for
74 *   _Scheduler_Release_critical().
75 */
76RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
77  const Scheduler_Control *scheduler,
78  ISR_lock_Context        *lock_context
79)
80{
81#if defined(RTEMS_SMP)
82  Scheduler_Context *context;
83
84  context = _Scheduler_Get_context( scheduler );
85  _ISR_lock_Acquire( &context->Lock, lock_context );
86#else
87  (void) scheduler;
88  (void) lock_context;
89#endif
90}
91
92/**
93 * @brief Releases the scheduler instance inside a critical section (interrupts
94 * disabled).
95 *
96 * @param[in] scheduler The scheduler instance.
97 * @param[in] lock_context The lock context used for
98 *   _Scheduler_Acquire_critical().
99 */
100RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
101  const Scheduler_Control *scheduler,
102  ISR_lock_Context        *lock_context
103)
104{
105#if defined(RTEMS_SMP)
106  Scheduler_Context *context;
107
108  context = _Scheduler_Get_context( scheduler );
109  _ISR_lock_Release( &context->Lock, lock_context );
110#else
111  (void) scheduler;
112  (void) lock_context;
113#endif
114}
115
116/**
117 * The preferred method to add a new scheduler is to define the jump table
118 * entries and add a case to the _Scheduler_Initialize routine.
119 *
120 * Generic scheduling implementations that rely on the ready queue only can
121 * be found in the _Scheduler_queue_XXX functions.
122 */
123
124/*
125 * Passing the Scheduler_Control* to these functions allows for multiple
126 * scheduler's to exist simultaneously, which could be useful on an SMP
127 * system.  Then remote Schedulers may be accessible.  How to protect such
128 * accesses remains an open problem.
129 */
130
131/**
132 * @brief General scheduling decision.
133 *
134 * This kernel routine implements the scheduling decision logic for
135 * the scheduler. It does NOT dispatch.
136 *
137 * @param[in] the_thread The thread which state changed previously.
138 */
139RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
140{
141  const Scheduler_Control *scheduler;
142  ISR_lock_Context         lock_context;
143
144  scheduler = _Thread_Scheduler_get_home( the_thread );
145  _Scheduler_Acquire_critical( scheduler, &lock_context );
146
147  ( *scheduler->Operations.schedule )( scheduler, the_thread );
148
149  _Scheduler_Release_critical( scheduler, &lock_context );
150}
151
152/**
153 * @brief Scheduler yield with a particular thread.
154 *
155 * This routine is invoked when a thread wishes to voluntarily transfer control
156 * of the processor to another thread.
157 *
158 * @param[in] the_thread The yielding thread.
159 */
160RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
161{
162#if defined(RTEMS_SMP)
163  Chain_Node              *node;
164  const Chain_Node        *tail;
165  Scheduler_Node          *scheduler_node;
166  const Scheduler_Control *scheduler;
167  ISR_lock_Context         lock_context;
168  bool                     needs_help;
169
170  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
171  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
172
173  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
174  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
175
176  _Scheduler_Acquire_critical( scheduler, &lock_context );
177  needs_help = ( *scheduler->Operations.yield )(
178    scheduler,
179    the_thread,
180    _Thread_Scheduler_get_home_node( the_thread )
181  );
182  _Scheduler_Release_critical( scheduler, &lock_context );
183
184  if ( !needs_help ) {
185    return;
186  }
187
188  node = _Chain_Next( node );
189
190  while ( node != tail ) {
191    bool success;
192
193    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
194    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
195
196    _Scheduler_Acquire_critical( scheduler, &lock_context );
197    success = ( *scheduler->Operations.ask_for_help )(
198      scheduler,
199      the_thread,
200      scheduler_node
201    );
202    _Scheduler_Release_critical( scheduler, &lock_context );
203
204    if ( success ) {
205      break;
206    }
207
208    node = _Chain_Next( node );
209  }
210#else
211  const Scheduler_Control *scheduler;
212
213  scheduler = _Thread_Scheduler_get_home( the_thread );
214  ( *scheduler->Operations.yield )(
215    scheduler,
216    the_thread,
217    _Thread_Scheduler_get_home_node( the_thread )
218  );
219#endif
220}
221
222/**
223 * @brief Blocks a thread with respect to the scheduler.
224 *
225 * This routine removes @a the_thread from the scheduling decision for
226 * the scheduler. The primary task is to remove the thread from the
227 * ready queue.  It performs any necessary schedulering operations
228 * including the selection of a new heir thread.
229 *
230 * @param[in] the_thread The thread.
231 */
232RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
233{
234#if defined(RTEMS_SMP)
235  Chain_Node              *node;
236  const Chain_Node        *tail;
237  Scheduler_Node          *scheduler_node;
238  const Scheduler_Control *scheduler;
239  ISR_lock_Context         lock_context;
240
241  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
242  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
243
244  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
245  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
246
247  _Scheduler_Acquire_critical( scheduler, &lock_context );
248  ( *scheduler->Operations.block )(
249    scheduler,
250    the_thread,
251    scheduler_node
252  );
253  _Scheduler_Release_critical( scheduler, &lock_context );
254
255  node = _Chain_Next( node );
256
257  while ( node != tail ) {
258    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
259    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
260
261    _Scheduler_Acquire_critical( scheduler, &lock_context );
262    ( *scheduler->Operations.withdraw_node )(
263      scheduler,
264      the_thread,
265      scheduler_node,
266      THREAD_SCHEDULER_BLOCKED
267    );
268    _Scheduler_Release_critical( scheduler, &lock_context );
269
270    node = _Chain_Next( node );
271  }
272#else
273  const Scheduler_Control *scheduler;
274
275  scheduler = _Thread_Scheduler_get_home( the_thread );
276  ( *scheduler->Operations.block )(
277    scheduler,
278    the_thread,
279    _Thread_Scheduler_get_home_node( the_thread )
280  );
281#endif
282}
283
284/**
285 * @brief Unblocks a thread with respect to the scheduler.
286 *
287 * This operation must fetch the latest thread priority value for this
288 * scheduler instance and update its internal state if necessary.
289 *
290 * @param[in] the_thread The thread.
291 *
292 * @see _Scheduler_Node_get_priority().
293 */
294RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
295{
296#if defined(RTEMS_SMP)
297  Chain_Node              *node;
298  const Chain_Node        *tail;
299  Scheduler_Node          *scheduler_node;
300  const Scheduler_Control *scheduler;
301  ISR_lock_Context         lock_context;
302  bool                     needs_help;
303
304  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
305  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
306
307  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
308  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
309
310  _Scheduler_Acquire_critical( scheduler, &lock_context );
311  needs_help = ( *scheduler->Operations.unblock )(
312    scheduler,
313    the_thread,
314    scheduler_node
315  );
316  _Scheduler_Release_critical( scheduler, &lock_context );
317
318  if ( !needs_help ) {
319    return;
320  }
321
322  node = _Chain_Next( node );
323
324  while ( node != tail ) {
325    bool success;
326
327    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
328    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
329
330    _Scheduler_Acquire_critical( scheduler, &lock_context );
331    success = ( *scheduler->Operations.ask_for_help )(
332      scheduler,
333      the_thread,
334      scheduler_node
335    );
336    _Scheduler_Release_critical( scheduler, &lock_context );
337
338    if ( success ) {
339      break;
340    }
341
342    node = _Chain_Next( node );
343  }
344#else
345  const Scheduler_Control *scheduler;
346
347  scheduler = _Thread_Scheduler_get_home( the_thread );
348  ( *scheduler->Operations.unblock )(
349    scheduler,
350    the_thread,
351    _Thread_Scheduler_get_home_node( the_thread )
352  );
353#endif
354}
355
356/**
357 * @brief Propagates a priority change of a thread to the scheduler.
358 *
359 * On uni-processor configurations, this operation must evaluate the thread
360 * state.  In case the thread is not ready, then the priority update should be
361 * deferred to the next scheduler unblock operation.
362 *
363 * The operation must update the heir and thread dispatch necessary variables
364 * in case the set of scheduled threads changes.
365 *
366 * @param[in] the_thread The thread changing its priority.
367 *
368 * @see _Scheduler_Node_get_priority().
369 */
370RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
371{
372#if defined(RTEMS_SMP)
373  Chain_Node       *node;
374  const Chain_Node *tail;
375
376  _Thread_Scheduler_process_requests( the_thread );
377
378  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
379  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
380
381  do {
382    Scheduler_Node          *scheduler_node;
383    const Scheduler_Control *scheduler;
384    ISR_lock_Context         lock_context;
385
386    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
387    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
388
389    _Scheduler_Acquire_critical( scheduler, &lock_context );
390    ( *scheduler->Operations.update_priority )(
391      scheduler,
392      the_thread,
393      scheduler_node
394    );
395    _Scheduler_Release_critical( scheduler, &lock_context );
396
397    node = _Chain_Next( node );
398  } while ( node != tail );
399#else
400  const Scheduler_Control *scheduler;
401
402  scheduler = _Thread_Scheduler_get_home( the_thread );
403  ( *scheduler->Operations.update_priority )(
404    scheduler,
405    the_thread,
406    _Thread_Scheduler_get_home_node( the_thread )
407  );
408#endif
409}
410
411#if defined(RTEMS_SMP)
412/**
413 * @brief Changes the sticky level of the home scheduler node and propagates a
414 * priority change of a thread to the scheduler.
415 *
416 * @param[in] the_thread The thread changing its priority or sticky level.
417 *
418 * @see _Scheduler_Update_priority().
419 */
420RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
421  Thread_Control *the_thread,
422  int             sticky_level_change
423)
424{
425  Chain_Node              *node;
426  const Chain_Node        *tail;
427  Scheduler_Node          *scheduler_node;
428  const Scheduler_Control *scheduler;
429  ISR_lock_Context         lock_context;
430
431  _Thread_Scheduler_process_requests( the_thread );
432
433  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
434  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
435  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
436
437  _Scheduler_Acquire_critical( scheduler, &lock_context );
438
439  scheduler_node->sticky_level += sticky_level_change;
440  _Assert( scheduler_node->sticky_level >= 0 );
441
442  ( *scheduler->Operations.update_priority )(
443    scheduler,
444    the_thread,
445    scheduler_node
446  );
447
448  _Scheduler_Release_critical( scheduler, &lock_context );
449
450  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
451  node = _Chain_Next( node );
452
453  while ( node != tail ) {
454    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
455    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
456
457    _Scheduler_Acquire_critical( scheduler, &lock_context );
458    ( *scheduler->Operations.update_priority )(
459      scheduler,
460      the_thread,
461      scheduler_node
462    );
463    _Scheduler_Release_critical( scheduler, &lock_context );
464
465    node = _Chain_Next( node );
466  }
467}
468#endif
469
470/**
471 * @brief Maps a thread priority from the user domain to the scheduler domain.
472 *
473 * Let M be the maximum scheduler priority.  The mapping must be bijective in
474 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
475 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
476 * other values the mapping is undefined.
477 *
478 * @param[in] scheduler The scheduler instance.
479 * @param[in] priority The user domain thread priority.
480 *
481 * @return The corresponding thread priority of the scheduler domain is returned.
482 */
483RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
484  const Scheduler_Control *scheduler,
485  Priority_Control         priority
486)
487{
488  return ( *scheduler->Operations.map_priority )( scheduler, priority );
489}
490
491/**
492 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
493 *
494 * @param[in] scheduler The scheduler instance.
495 * @param[in] priority The scheduler domain thread priority.
496 *
497 * @return The corresponding thread priority of the user domain is returned.
498 */
499RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
500  const Scheduler_Control *scheduler,
501  Priority_Control         priority
502)
503{
504  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
505}
506
507/**
508 * @brief Initializes a scheduler node.
509 *
510 * The scheduler node contains arbitrary data on function entry.  The caller
511 * must ensure that _Scheduler_Node_destroy() will be called after a
512 * _Scheduler_Node_initialize() before the memory of the scheduler node is
513 * destroyed.
514 *
515 * @param[in] scheduler The scheduler instance.
516 * @param[in] node The scheduler node to initialize.
517 * @param[in] the_thread The thread of the scheduler node to initialize.
518 * @param[in] priority The thread priority.
519 */
520RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
521  const Scheduler_Control *scheduler,
522  Scheduler_Node          *node,
523  Thread_Control          *the_thread,
524  Priority_Control         priority
525)
526{
527  ( *scheduler->Operations.node_initialize )(
528    scheduler,
529    node,
530    the_thread,
531    priority
532  );
533}
534
535/**
536 * @brief Destroys a scheduler node.
537 *
538 * The caller must ensure that _Scheduler_Node_destroy() will be called only
539 * after a corresponding _Scheduler_Node_initialize().
540 *
541 * @param[in] scheduler The scheduler instance.
542 * @param[in] node The scheduler node to destroy.
543 */
544RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
545  const Scheduler_Control *scheduler,
546  Scheduler_Node          *node
547)
548{
549  ( *scheduler->Operations.node_destroy )( scheduler, node );
550}
551
552/**
553 * @brief Releases a job of a thread with respect to the scheduler.
554 *
555 * @param[in] the_thread The thread.
556 * @param[in] priority_node The priority node of the job.
557 * @param[in] deadline The deadline in watchdog ticks since boot.
558 * @param[in] queue_context The thread queue context to provide the set of
559 *   threads for _Thread_Priority_update().
560 */
561RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
562  Thread_Control       *the_thread,
563  Priority_Node        *priority_node,
564  uint64_t              deadline,
565  Thread_queue_Context *queue_context
566)
567{
568  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
569
570  _Thread_queue_Context_clear_priority_updates( queue_context );
571  ( *scheduler->Operations.release_job )(
572    scheduler,
573    the_thread,
574    priority_node,
575    deadline,
576    queue_context
577  );
578}
579
580/**
581 * @brief Cancels a job of a thread with respect to the scheduler.
582 *
583 * @param[in] the_thread The thread.
584 * @param[in] priority_node The priority node of the job.
585 * @param[in] queue_context The thread queue context to provide the set of
586 *   threads for _Thread_Priority_update().
587 */
588RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
589  Thread_Control       *the_thread,
590  Priority_Node        *priority_node,
591  Thread_queue_Context *queue_context
592)
593{
594  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
595
596  _Thread_queue_Context_clear_priority_updates( queue_context );
597  ( *scheduler->Operations.cancel_job )(
598    scheduler,
599    the_thread,
600    priority_node,
601    queue_context
602  );
603}
604
605/**
606 * @brief Scheduler method invoked at each clock tick.
607 *
608 * This method is invoked at each clock tick to allow the scheduler
609 * implementation to perform any activities required.  For the
610 * scheduler which support standard RTEMS features, this includes
611 * time-slicing management.
612 */
613RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
614{
615  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
616  Thread_Control *executing = cpu->executing;
617
618  if ( scheduler != NULL && executing != NULL ) {
619    ( *scheduler->Operations.tick )( scheduler, executing );
620  }
621}
622
623/**
624 * @brief Starts the idle thread for a particular processor.
625 *
626 * @param[in] scheduler The scheduler instance.
627 * @param[in,out] the_thread The idle thread for the processor.
628 * @param[in,out] cpu The processor for the idle thread.
629 *
630 * @see _Thread_Create_idle().
631 */
632RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
633  const Scheduler_Control *scheduler,
634  Thread_Control          *the_thread,
635  Per_CPU_Control         *cpu
636)
637{
638  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
639}
640
641RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
642  const Scheduler_Control *scheduler,
643  uint32_t                 cpu_index
644)
645{
646#if defined(RTEMS_SMP)
647  const Per_CPU_Control   *cpu;
648  const Scheduler_Control *scheduler_of_cpu;
649
650  cpu = _Per_CPU_Get_by_index( cpu_index );
651  scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
652
653  return scheduler_of_cpu == scheduler;
654#else
655  (void) scheduler;
656  (void) cpu_index;
657
658  return true;
659#endif
660}
661
662#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
663
664RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
665  const Scheduler_Control *scheduler,
666  size_t                   cpusetsize,
667  cpu_set_t               *cpuset
668)
669{
670  uint32_t cpu_count = _SMP_Get_processor_count();
671  uint32_t cpu_index;
672
673  CPU_ZERO_S( cpusetsize, cpuset );
674
675  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
676#if defined(RTEMS_SMP)
677    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
678      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
679    }
680#else
681    (void) scheduler;
682
683    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
684#endif
685  }
686}
687
688RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
689  const Scheduler_Control *scheduler,
690  Thread_Control          *the_thread,
691  size_t                   cpusetsize,
692  cpu_set_t               *cpuset
693)
694{
695  (void) the_thread;
696
697  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
698
699  return true;
700}
701
702bool _Scheduler_Get_affinity(
703  Thread_Control *the_thread,
704  size_t          cpusetsize,
705  cpu_set_t      *cpuset
706);
707
708RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
709  const Scheduler_Control *scheduler,
710  Thread_Control          *the_thread,
711  size_t                   cpusetsize,
712  const cpu_set_t         *cpuset
713)
714{
715  uint32_t cpu_count = _SMP_Get_processor_count();
716  uint32_t cpu_index;
717  bool     ok = true;
718
719  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
720#if defined(RTEMS_SMP)
721    const Per_CPU_Control   *cpu;
722    const Scheduler_Control *scheduler_of_cpu;
723
724    cpu = _Per_CPU_Get_by_index( cpu_index );
725    scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
726
727    ok = ok
728      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
729        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
730          && scheduler != scheduler_of_cpu ) );
731#else
732    (void) scheduler;
733
734    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
735#endif
736  }
737
738  return ok;
739}
740
741bool _Scheduler_Set_affinity(
742  Thread_Control  *the_thread,
743  size_t           cpusetsize,
744  const cpu_set_t *cpuset
745);
746
747#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
748
749RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
750  const Scheduler_Control *scheduler,
751  Thread_Control          *the_thread,
752  Scheduler_Node          *node,
753  void                  ( *extract )(
754                             const Scheduler_Control *,
755                             Thread_Control *,
756                             Scheduler_Node *
757                        ),
758  void                  ( *schedule )(
759                             const Scheduler_Control *,
760                             Thread_Control *,
761                             bool
762                        )
763)
764{
765  ( *extract )( scheduler, the_thread, node );
766
767  /* TODO: flash critical section? */
768
769  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
770    ( *schedule )( scheduler, the_thread, true );
771  }
772}
773
774RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
775  const Scheduler_Control *scheduler
776)
777{
778#if defined(RTEMS_SMP)
779  return _Scheduler_Get_context( scheduler )->processor_count;
780#else
781  (void) scheduler;
782
783  return 1;
784#endif
785}
786
787RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
788{
789  return _Objects_Build_id(
790    OBJECTS_FAKE_OBJECTS_API,
791    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
792    _Objects_Local_node,
793    (uint16_t) ( scheduler_index + 1 )
794  );
795}
796
797RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
798{
799  uint32_t minimum_id = _Scheduler_Build_id( 0 );
800
801  return id - minimum_id;
802}
803
804RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
805  Objects_Id id
806)
807{
808  uint32_t index;
809
810  index = _Scheduler_Get_index_by_id( id );
811
812  if ( index >= _Scheduler_Count ) {
813    return NULL;
814  }
815
816  return &_Scheduler_Table[ index ];
817}
818
819RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
820  const Scheduler_Control *scheduler
821)
822{
823  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
824}
825
826RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
827  Thread_Control   *the_thread,
828  Priority_Control  new_priority,
829  bool              prepend_it
830)
831{
832  Scheduler_Node *scheduler_node;
833
834  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
835  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
836}
837
838#if defined(RTEMS_SMP)
839/**
840 * @brief Gets an idle thread from the scheduler instance.
841 *
842 * @param[in] context The scheduler instance context.
843 *
844 * @retval idle An idle thread for use.  This function must always return an
845 * idle thread.  If none is available, then this is a fatal error.
846 */
847typedef Thread_Control *( *Scheduler_Get_idle_thread )(
848  Scheduler_Context *context
849);
850
851/**
852 * @brief Releases an idle thread to the scheduler instance for reuse.
853 *
854 * @param[in] context The scheduler instance context.
855 * @param[in] idle The idle thread to release
856 */
857typedef void ( *Scheduler_Release_idle_thread )(
858  Scheduler_Context *context,
859  Thread_Control    *idle
860);
861
862RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
863  Thread_Control         *the_thread,
864  Thread_Scheduler_state  new_state
865)
866{
867  _Assert(
868    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
869      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
870      || !_System_state_Is_up( _System_state_Get() )
871  );
872
873  the_thread->Scheduler.state = new_state;
874}
875
876RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
877  Scheduler_Node *node,
878  Thread_Control *idle
879)
880{
881  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
882  _Assert(
883    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
884  );
885
886  _Scheduler_Node_set_user( node, idle );
887  node->idle = idle;
888}
889
890/**
891 * @brief Use an idle thread for this scheduler node.
892 *
893 * A thread those home scheduler node has a sticky level greater than zero may
894 * use an idle thread in the home scheduler instance in case it executes
895 * currently in another scheduler instance or in case it is in a blocking
896 * state.
897 *
898 * @param[in] context The scheduler instance context.
899 * @param[in] node The node which wants to use the idle thread.
900 * @param[in] cpu The processor for the idle thread.
901 * @param[in] get_idle_thread Function to get an idle thread.
902 */
903RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
904  Scheduler_Context         *context,
905  Scheduler_Node            *node,
906  Per_CPU_Control           *cpu,
907  Scheduler_Get_idle_thread  get_idle_thread
908)
909{
910  Thread_Control *idle = ( *get_idle_thread )( context );
911
912  _Scheduler_Set_idle_thread( node, idle );
913  _Thread_Set_CPU( idle, cpu );
914  return idle;
915}
916
917typedef enum {
918  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
919  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
920  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
921} Scheduler_Try_to_schedule_action;
922
923/**
924 * @brief Try to schedule this scheduler node.
925 *
926 * @param[in] context The scheduler instance context.
927 * @param[in] node The node which wants to get scheduled.
928 * @param[in] idle A potential idle thread used by a potential victim node.
929 * @param[in] get_idle_thread Function to get an idle thread.
930 *
931 * @retval true This node can be scheduled.
932 * @retval false Otherwise.
933 */
934RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
935_Scheduler_Try_to_schedule_node(
936  Scheduler_Context         *context,
937  Scheduler_Node            *node,
938  Thread_Control            *idle,
939  Scheduler_Get_idle_thread  get_idle_thread
940)
941{
942  ISR_lock_Context                  lock_context;
943  Scheduler_Try_to_schedule_action  action;
944  Thread_Control                   *owner;
945
946  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
947  owner = _Scheduler_Node_get_owner( node );
948  _Assert( _Scheduler_Node_get_user( node ) == owner );
949  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
950
951  _Thread_Scheduler_acquire_critical( owner, &lock_context );
952
953  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
954    _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
955    _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
956  } else if (
957    owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
958      && node->sticky_level <= 1
959  ) {
960    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
961  } else if ( node->sticky_level == 0 ) {
962    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
963  } else if ( idle != NULL ) {
964    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
965  } else {
966    _Scheduler_Use_idle_thread(
967      context,
968      node,
969      _Thread_Get_CPU( owner ),
970      get_idle_thread
971    );
972  }
973
974  _Thread_Scheduler_release_critical( owner, &lock_context );
975  return action;
976}
977
978/**
979 * @brief Release an idle thread using this scheduler node.
980 *
981 * @param[in] context The scheduler instance context.
982 * @param[in] node The node which may have an idle thread as user.
983 * @param[in] release_idle_thread Function to release an idle thread.
984 *
985 * @retval idle The idle thread which used this node.
986 * @retval NULL This node had no idle thread as an user.
987 */
988RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
989  Scheduler_Context             *context,
990  Scheduler_Node                *node,
991  Scheduler_Release_idle_thread  release_idle_thread
992)
993{
994  Thread_Control *idle = _Scheduler_Node_get_idle( node );
995
996  if ( idle != NULL ) {
997    Thread_Control *owner = _Scheduler_Node_get_owner( node );
998
999    node->idle = NULL;
1000    _Scheduler_Node_set_user( node, owner );
1001    ( *release_idle_thread )( context, idle );
1002  }
1003
1004  return idle;
1005}
1006
1007RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1008  Scheduler_Node *needs_idle,
1009  Scheduler_Node *uses_idle,
1010  Thread_Control *idle
1011)
1012{
1013  uses_idle->idle = NULL;
1014  _Scheduler_Node_set_user(
1015    uses_idle,
1016    _Scheduler_Node_get_owner( uses_idle )
1017  );
1018  _Scheduler_Set_idle_thread( needs_idle, idle );
1019}
1020
1021/**
1022 * @brief Block this scheduler node.
1023 *
1024 * @param[in] context The scheduler instance context.
1025 * @param[in] thread The thread which wants to get blocked referencing this
1026 *   node.  This is not necessarily the user of this node in case the node
1027 *   participates in the scheduler helping protocol.
1028 * @param[in] node The node which wants to get blocked.
1029 * @param[in] is_scheduled This node is scheduled.
1030 * @param[in] get_idle_thread Function to get an idle thread.
1031 *
1032 * @retval thread_cpu The processor of the thread.  Indicates to continue with
1033 *   the blocking operation.
1034 * @retval NULL Otherwise.
1035 */
1036RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
1037  Scheduler_Context         *context,
1038  Thread_Control            *thread,
1039  Scheduler_Node            *node,
1040  bool                       is_scheduled,
1041  Scheduler_Get_idle_thread  get_idle_thread
1042)
1043{
1044  int               sticky_level;
1045  ISR_lock_Context  lock_context;
1046  Per_CPU_Control  *thread_cpu;
1047
1048  sticky_level = node->sticky_level;
1049  --sticky_level;
1050  node->sticky_level = sticky_level;
1051  _Assert( sticky_level >= 0 );
1052
1053  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1054  thread_cpu = _Thread_Get_CPU( thread );
1055  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
1056  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1057  _Thread_Scheduler_release_critical( thread, &lock_context );
1058
1059  if ( sticky_level > 0 ) {
1060    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
1061      Thread_Control *idle;
1062
1063      idle = _Scheduler_Use_idle_thread(
1064        context,
1065        node,
1066        thread_cpu,
1067        get_idle_thread
1068      );
1069      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
1070    }
1071
1072    return NULL;
1073  }
1074
1075  _Assert( thread == _Scheduler_Node_get_user( node ) );
1076  return thread_cpu;
1077}
1078
1079RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
1080  Scheduler_Context             *context,
1081  Thread_Control                *the_thread,
1082  Scheduler_Node                *node,
1083  Scheduler_Release_idle_thread  release_idle_thread
1084)
1085{
1086  Thread_Control  *idle;
1087  Thread_Control  *owner;
1088  Per_CPU_Control *cpu;
1089
1090  idle = _Scheduler_Node_get_idle( node );
1091  owner = _Scheduler_Node_get_owner( node );
1092
1093  node->idle = NULL;
1094  _Assert( _Scheduler_Node_get_user( node ) == idle );
1095  _Scheduler_Node_set_user( node, owner );
1096  ( *release_idle_thread )( context, idle );
1097
1098  cpu = _Thread_Get_CPU( idle );
1099  _Thread_Set_CPU( the_thread, cpu );
1100  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
1101}
1102
1103/**
1104 * @brief Unblock this scheduler node.
1105 *
1106 * @param[in] context The scheduler instance context.
1107 * @param[in] the_thread The thread which wants to get unblocked.
1108 * @param[in] node The node which wants to get unblocked.
1109 * @param[in] is_scheduled This node is scheduled.
1110 * @param[in] release_idle_thread Function to release an idle thread.
1111 *
1112 * @retval true Continue with the unblocking operation.
1113 * @retval false Otherwise.
1114 */
1115RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1116  Scheduler_Context             *context,
1117  Thread_Control                *the_thread,
1118  Scheduler_Node                *node,
1119  bool                           is_scheduled,
1120  Scheduler_Release_idle_thread  release_idle_thread
1121)
1122{
1123  bool unblock;
1124
1125  ++node->sticky_level;
1126  _Assert( node->sticky_level > 0 );
1127
1128  if ( is_scheduled ) {
1129    _Scheduler_Discard_idle_thread(
1130      context,
1131      the_thread,
1132      node,
1133      release_idle_thread
1134    );
1135    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1136    unblock = false;
1137  } else {
1138    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1139    unblock = true;
1140  }
1141
1142  return unblock;
1143}
1144#endif
1145
1146RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1147  Thread_Control *new_heir,
1148  bool            force_dispatch
1149)
1150{
1151  Thread_Control *heir = _Thread_Heir;
1152
1153  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1154#if defined(RTEMS_SMP)
1155    /*
1156     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1157     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1158     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1159     * schedulers.
1160     */
1161    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1162    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1163#endif
1164    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1165    _Thread_Heir = new_heir;
1166    _Thread_Dispatch_necessary = true;
1167  }
1168}
1169
1170RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1171  const Scheduler_Control *new_scheduler,
1172  Thread_Control          *the_thread,
1173  Priority_Control         priority
1174)
1175{
1176  Scheduler_Node          *new_scheduler_node;
1177  Scheduler_Node          *old_scheduler_node;
1178#if defined(RTEMS_SMP)
1179  ISR_lock_Context         lock_context;
1180  const Scheduler_Control *old_scheduler;
1181
1182#endif
1183
1184  if ( the_thread->Wait.queue != NULL ) {
1185    return STATUS_RESOURCE_IN_USE;
1186  }
1187
1188  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1189  _Priority_Plain_extract(
1190    &old_scheduler_node->Wait.Priority,
1191    &the_thread->Real_priority
1192  );
1193
1194  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1195    _Priority_Plain_insert(
1196      &old_scheduler_node->Wait.Priority,
1197      &the_thread->Real_priority,
1198      the_thread->Real_priority.priority
1199    );
1200    return STATUS_RESOURCE_IN_USE;
1201  }
1202
1203#if defined(RTEMS_SMP)
1204  if ( !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes ) ) {
1205    _Priority_Plain_insert(
1206      &old_scheduler_node->Wait.Priority,
1207      &the_thread->Real_priority,
1208      the_thread->Real_priority.priority
1209    );
1210    return STATUS_RESOURCE_IN_USE;
1211  }
1212
1213  old_scheduler = _Thread_Scheduler_get_home( the_thread );
1214
1215  _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1216
1217  if ( _Scheduler_Get_processor_count( new_scheduler ) == 0 ) {
1218    _Scheduler_Release_critical( new_scheduler, &lock_context );
1219    _Priority_Plain_insert(
1220      &old_scheduler_node->Wait.Priority,
1221      &the_thread->Real_priority,
1222      the_thread->Real_priority.priority
1223    );
1224    return STATUS_UNSATISFIED;
1225  }
1226
1227  the_thread->Scheduler.home = new_scheduler;
1228
1229  _Scheduler_Release_critical( new_scheduler, &lock_context );
1230
1231  _Thread_Scheduler_process_requests( the_thread );
1232  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1233    the_thread,
1234    _Scheduler_Get_index( new_scheduler )
1235  );
1236#else
1237  new_scheduler_node = old_scheduler_node;
1238#endif
1239
1240  the_thread->Start.initial_priority = priority;
1241  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1242  _Priority_Initialize_one(
1243    &new_scheduler_node->Wait.Priority,
1244    &the_thread->Real_priority
1245  );
1246
1247#if defined(RTEMS_SMP)
1248  if ( old_scheduler != new_scheduler ) {
1249    States_Control current_state;
1250
1251    current_state = the_thread->current_state;
1252
1253    if ( _States_Is_ready( current_state ) ) {
1254      _Scheduler_Block( the_thread );
1255    }
1256
1257    _Assert( old_scheduler_node->sticky_level == 0 );
1258    _Assert( new_scheduler_node->sticky_level == 0 );
1259
1260    _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1261    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1262    _Chain_Initialize_one(
1263      &the_thread->Scheduler.Wait_nodes,
1264      &new_scheduler_node->Thread.Wait_node
1265    );
1266    _Chain_Extract_unprotected(
1267      &old_scheduler_node->Thread.Scheduler_node.Chain
1268    );
1269    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1270    _Chain_Initialize_one(
1271      &the_thread->Scheduler.Scheduler_nodes,
1272      &new_scheduler_node->Thread.Scheduler_node.Chain
1273    );
1274
1275    _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1276
1277    if ( _States_Is_ready( current_state ) ) {
1278      _Scheduler_Unblock( the_thread );
1279    }
1280
1281    return STATUS_SUCCESSFUL;
1282  }
1283#endif
1284
1285  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1286  _Scheduler_Update_priority( the_thread );
1287  return STATUS_SUCCESSFUL;
1288}
1289
1290/** @} */
1291
1292#ifdef __cplusplus
1293}
1294#endif
1295
1296#endif
1297/* end of include file */
Note: See TracBrowser for help on using the repository browser.