source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 7a4b2645

5
Last change on this file since 7a4b2645 was 7a4b2645, checked in by Joel Sherrill <joel@…>, on 01/11/17 at 15:43:06

Remove obsolete RTEMS_HAVE_SYS_CPUSET_H

  • Property mode set to 100644
File size: 35.6 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/cpusetimpl.h>
26#include <rtems/score/priorityimpl.h>
27#include <rtems/score/smpimpl.h>
28#include <rtems/score/status.h>
29#include <rtems/score/threadimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35/**
36 * @addtogroup ScoreScheduler
37 */
38/**@{**/
39
40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
57  const Per_CPU_Control *cpu
58)
59{
60#if defined(RTEMS_SMP)
61  return cpu->Scheduler.control;
62#else
63  (void) cpu;
64  return &_Scheduler_Table[ 0 ];
65#endif
66}
67
68/**
69 * @brief Acquires the scheduler instance inside a critical section (interrupts
70 * disabled).
71 *
72 * @param[in] scheduler The scheduler instance.
73 * @param[in] lock_context The lock context to use for
74 *   _Scheduler_Release_critical().
75 */
76RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
77  const Scheduler_Control *scheduler,
78  ISR_lock_Context        *lock_context
79)
80{
81#if defined(RTEMS_SMP)
82  Scheduler_Context *context;
83
84  context = _Scheduler_Get_context( scheduler );
85  _ISR_lock_Acquire( &context->Lock, lock_context );
86#else
87  (void) scheduler;
88  (void) lock_context;
89#endif
90}
91
92/**
93 * @brief Releases the scheduler instance inside a critical section (interrupts
94 * disabled).
95 *
96 * @param[in] scheduler The scheduler instance.
97 * @param[in] lock_context The lock context used for
98 *   _Scheduler_Acquire_critical().
99 */
100RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
101  const Scheduler_Control *scheduler,
102  ISR_lock_Context        *lock_context
103)
104{
105#if defined(RTEMS_SMP)
106  Scheduler_Context *context;
107
108  context = _Scheduler_Get_context( scheduler );
109  _ISR_lock_Release( &context->Lock, lock_context );
110#else
111  (void) scheduler;
112  (void) lock_context;
113#endif
114}
115
116/**
117 * The preferred method to add a new scheduler is to define the jump table
118 * entries and add a case to the _Scheduler_Initialize routine.
119 *
120 * Generic scheduling implementations that rely on the ready queue only can
121 * be found in the _Scheduler_queue_XXX functions.
122 */
123
124/*
125 * Passing the Scheduler_Control* to these functions allows for multiple
126 * scheduler's to exist simultaneously, which could be useful on an SMP
127 * system.  Then remote Schedulers may be accessible.  How to protect such
128 * accesses remains an open problem.
129 */
130
131/**
132 * @brief General scheduling decision.
133 *
134 * This kernel routine implements the scheduling decision logic for
135 * the scheduler. It does NOT dispatch.
136 *
137 * @param[in] the_thread The thread which state changed previously.
138 */
139RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
140{
141  const Scheduler_Control *scheduler;
142  ISR_lock_Context         lock_context;
143
144  scheduler = _Thread_Scheduler_get_home( the_thread );
145  _Scheduler_Acquire_critical( scheduler, &lock_context );
146
147  ( *scheduler->Operations.schedule )( scheduler, the_thread );
148
149  _Scheduler_Release_critical( scheduler, &lock_context );
150}
151
152/**
153 * @brief Scheduler yield with a particular thread.
154 *
155 * This routine is invoked when a thread wishes to voluntarily transfer control
156 * of the processor to another thread.
157 *
158 * @param[in] the_thread The yielding thread.
159 */
160RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
161{
162#if defined(RTEMS_SMP)
163  Chain_Node              *node;
164  const Chain_Node        *tail;
165  Scheduler_Node          *scheduler_node;
166  const Scheduler_Control *scheduler;
167  ISR_lock_Context         lock_context;
168  bool                     needs_help;
169
170  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
171  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
172
173  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
174  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
175
176  _Scheduler_Acquire_critical( scheduler, &lock_context );
177  needs_help = ( *scheduler->Operations.yield )(
178    scheduler,
179    the_thread,
180    _Thread_Scheduler_get_home_node( the_thread )
181  );
182  _Scheduler_Release_critical( scheduler, &lock_context );
183
184  if ( !needs_help ) {
185    return;
186  }
187
188  node = _Chain_Next( node );
189
190  while ( node != tail ) {
191    bool success;
192
193    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
194    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
195
196    _Scheduler_Acquire_critical( scheduler, &lock_context );
197    success = ( *scheduler->Operations.ask_for_help )(
198      scheduler,
199      the_thread,
200      scheduler_node
201    );
202    _Scheduler_Release_critical( scheduler, &lock_context );
203
204    if ( success ) {
205      break;
206    }
207
208    node = _Chain_Next( node );
209  }
210#else
211  const Scheduler_Control *scheduler;
212
213  scheduler = _Thread_Scheduler_get_home( the_thread );
214  ( *scheduler->Operations.yield )(
215    scheduler,
216    the_thread,
217    _Thread_Scheduler_get_home_node( the_thread )
218  );
219#endif
220}
221
222/**
223 * @brief Blocks a thread with respect to the scheduler.
224 *
225 * This routine removes @a the_thread from the scheduling decision for
226 * the scheduler. The primary task is to remove the thread from the
227 * ready queue.  It performs any necessary schedulering operations
228 * including the selection of a new heir thread.
229 *
230 * @param[in] the_thread The thread.
231 */
232RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
233{
234#if defined(RTEMS_SMP)
235  Chain_Node              *node;
236  const Chain_Node        *tail;
237  Scheduler_Node          *scheduler_node;
238  const Scheduler_Control *scheduler;
239  ISR_lock_Context         lock_context;
240
241  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
242  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
243
244  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
245  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
246
247  _Scheduler_Acquire_critical( scheduler, &lock_context );
248  ( *scheduler->Operations.block )(
249    scheduler,
250    the_thread,
251    scheduler_node
252  );
253  _Scheduler_Release_critical( scheduler, &lock_context );
254
255  node = _Chain_Next( node );
256
257  while ( node != tail ) {
258    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
259    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
260
261    _Scheduler_Acquire_critical( scheduler, &lock_context );
262    ( *scheduler->Operations.withdraw_node )(
263      scheduler,
264      the_thread,
265      scheduler_node,
266      THREAD_SCHEDULER_BLOCKED
267    );
268    _Scheduler_Release_critical( scheduler, &lock_context );
269
270    node = _Chain_Next( node );
271  }
272#else
273  const Scheduler_Control *scheduler;
274
275  scheduler = _Thread_Scheduler_get_home( the_thread );
276  ( *scheduler->Operations.block )(
277    scheduler,
278    the_thread,
279    _Thread_Scheduler_get_home_node( the_thread )
280  );
281#endif
282}
283
284/**
285 * @brief Unblocks a thread with respect to the scheduler.
286 *
287 * This operation must fetch the latest thread priority value for this
288 * scheduler instance and update its internal state if necessary.
289 *
290 * @param[in] the_thread The thread.
291 *
292 * @see _Scheduler_Node_get_priority().
293 */
294RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
295{
296#if defined(RTEMS_SMP)
297  Chain_Node              *node;
298  const Chain_Node        *tail;
299  Scheduler_Node          *scheduler_node;
300  const Scheduler_Control *scheduler;
301  ISR_lock_Context         lock_context;
302  bool                     needs_help;
303
304  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
305  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
306
307  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
308  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
309
310  _Scheduler_Acquire_critical( scheduler, &lock_context );
311  needs_help = ( *scheduler->Operations.unblock )(
312    scheduler,
313    the_thread,
314    scheduler_node
315  );
316  _Scheduler_Release_critical( scheduler, &lock_context );
317
318  if ( !needs_help ) {
319    return;
320  }
321
322  node = _Chain_Next( node );
323
324  while ( node != tail ) {
325    bool success;
326
327    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
328    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
329
330    _Scheduler_Acquire_critical( scheduler, &lock_context );
331    success = ( *scheduler->Operations.ask_for_help )(
332      scheduler,
333      the_thread,
334      scheduler_node
335    );
336    _Scheduler_Release_critical( scheduler, &lock_context );
337
338    if ( success ) {
339      break;
340    }
341
342    node = _Chain_Next( node );
343  }
344#else
345  const Scheduler_Control *scheduler;
346
347  scheduler = _Thread_Scheduler_get_home( the_thread );
348  ( *scheduler->Operations.unblock )(
349    scheduler,
350    the_thread,
351    _Thread_Scheduler_get_home_node( the_thread )
352  );
353#endif
354}
355
356/**
357 * @brief Propagates a priority change of a thread to the scheduler.
358 *
359 * On uni-processor configurations, this operation must evaluate the thread
360 * state.  In case the thread is not ready, then the priority update should be
361 * deferred to the next scheduler unblock operation.
362 *
363 * The operation must update the heir and thread dispatch necessary variables
364 * in case the set of scheduled threads changes.
365 *
366 * @param[in] the_thread The thread changing its priority.
367 *
368 * @see _Scheduler_Node_get_priority().
369 */
370RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
371{
372#if defined(RTEMS_SMP)
373  Chain_Node       *node;
374  const Chain_Node *tail;
375
376  _Thread_Scheduler_process_requests( the_thread );
377
378  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
379  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
380
381  do {
382    Scheduler_Node          *scheduler_node;
383    const Scheduler_Control *scheduler;
384    ISR_lock_Context         lock_context;
385
386    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
387    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
388
389    _Scheduler_Acquire_critical( scheduler, &lock_context );
390    ( *scheduler->Operations.update_priority )(
391      scheduler,
392      the_thread,
393      scheduler_node
394    );
395    _Scheduler_Release_critical( scheduler, &lock_context );
396
397    node = _Chain_Next( node );
398  } while ( node != tail );
399#else
400  const Scheduler_Control *scheduler;
401
402  scheduler = _Thread_Scheduler_get_home( the_thread );
403  ( *scheduler->Operations.update_priority )(
404    scheduler,
405    the_thread,
406    _Thread_Scheduler_get_home_node( the_thread )
407  );
408#endif
409}
410
411#if defined(RTEMS_SMP)
412/**
413 * @brief Changes the sticky level of the home scheduler node and propagates a
414 * priority change of a thread to the scheduler.
415 *
416 * @param[in] the_thread The thread changing its priority or sticky level.
417 *
418 * @see _Scheduler_Update_priority().
419 */
420RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
421  Thread_Control *the_thread,
422  int             sticky_level_change
423)
424{
425  Chain_Node              *node;
426  const Chain_Node        *tail;
427  Scheduler_Node          *scheduler_node;
428  const Scheduler_Control *scheduler;
429  ISR_lock_Context         lock_context;
430
431  _Thread_Scheduler_process_requests( the_thread );
432
433  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
434  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
435  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
436
437  _Scheduler_Acquire_critical( scheduler, &lock_context );
438
439  scheduler_node->sticky_level += sticky_level_change;
440  _Assert( scheduler_node->sticky_level >= 0 );
441
442  ( *scheduler->Operations.update_priority )(
443    scheduler,
444    the_thread,
445    scheduler_node
446  );
447
448  _Scheduler_Release_critical( scheduler, &lock_context );
449
450  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
451  node = _Chain_Next( node );
452
453  while ( node != tail ) {
454    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
455    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
456
457    _Scheduler_Acquire_critical( scheduler, &lock_context );
458    ( *scheduler->Operations.update_priority )(
459      scheduler,
460      the_thread,
461      scheduler_node
462    );
463    _Scheduler_Release_critical( scheduler, &lock_context );
464
465    node = _Chain_Next( node );
466  }
467}
468#endif
469
470/**
471 * @brief Maps a thread priority from the user domain to the scheduler domain.
472 *
473 * Let M be the maximum scheduler priority.  The mapping must be bijective in
474 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
475 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
476 * other values the mapping is undefined.
477 *
478 * @param[in] scheduler The scheduler instance.
479 * @param[in] priority The user domain thread priority.
480 *
481 * @return The corresponding thread priority of the scheduler domain is returned.
482 */
483RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
484  const Scheduler_Control *scheduler,
485  Priority_Control         priority
486)
487{
488  return ( *scheduler->Operations.map_priority )( scheduler, priority );
489}
490
491/**
492 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
493 *
494 * @param[in] scheduler The scheduler instance.
495 * @param[in] priority The scheduler domain thread priority.
496 *
497 * @return The corresponding thread priority of the user domain is returned.
498 */
499RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
500  const Scheduler_Control *scheduler,
501  Priority_Control         priority
502)
503{
504  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
505}
506
507/**
508 * @brief Initializes a scheduler node.
509 *
510 * The scheduler node contains arbitrary data on function entry.  The caller
511 * must ensure that _Scheduler_Node_destroy() will be called after a
512 * _Scheduler_Node_initialize() before the memory of the scheduler node is
513 * destroyed.
514 *
515 * @param[in] scheduler The scheduler instance.
516 * @param[in] node The scheduler node to initialize.
517 * @param[in] the_thread The thread of the scheduler node to initialize.
518 * @param[in] priority The thread priority.
519 */
520RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
521  const Scheduler_Control *scheduler,
522  Scheduler_Node          *node,
523  Thread_Control          *the_thread,
524  Priority_Control         priority
525)
526{
527  ( *scheduler->Operations.node_initialize )(
528    scheduler,
529    node,
530    the_thread,
531    priority
532  );
533}
534
535/**
536 * @brief Destroys a scheduler node.
537 *
538 * The caller must ensure that _Scheduler_Node_destroy() will be called only
539 * after a corresponding _Scheduler_Node_initialize().
540 *
541 * @param[in] scheduler The scheduler instance.
542 * @param[in] node The scheduler node to destroy.
543 */
544RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
545  const Scheduler_Control *scheduler,
546  Scheduler_Node          *node
547)
548{
549  ( *scheduler->Operations.node_destroy )( scheduler, node );
550}
551
552/**
553 * @brief Releases a job of a thread with respect to the scheduler.
554 *
555 * @param[in] the_thread The thread.
556 * @param[in] priority_node The priority node of the job.
557 * @param[in] deadline The deadline in watchdog ticks since boot.
558 * @param[in] queue_context The thread queue context to provide the set of
559 *   threads for _Thread_Priority_update().
560 */
561RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
562  Thread_Control       *the_thread,
563  Priority_Node        *priority_node,
564  uint64_t              deadline,
565  Thread_queue_Context *queue_context
566)
567{
568  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
569
570  _Thread_queue_Context_clear_priority_updates( queue_context );
571  ( *scheduler->Operations.release_job )(
572    scheduler,
573    the_thread,
574    priority_node,
575    deadline,
576    queue_context
577  );
578}
579
580/**
581 * @brief Cancels a job of a thread with respect to the scheduler.
582 *
583 * @param[in] the_thread The thread.
584 * @param[in] priority_node The priority node of the job.
585 * @param[in] queue_context The thread queue context to provide the set of
586 *   threads for _Thread_Priority_update().
587 */
588RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
589  Thread_Control       *the_thread,
590  Priority_Node        *priority_node,
591  Thread_queue_Context *queue_context
592)
593{
594  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
595
596  _Thread_queue_Context_clear_priority_updates( queue_context );
597  ( *scheduler->Operations.cancel_job )(
598    scheduler,
599    the_thread,
600    priority_node,
601    queue_context
602  );
603}
604
605/**
606 * @brief Scheduler method invoked at each clock tick.
607 *
608 * This method is invoked at each clock tick to allow the scheduler
609 * implementation to perform any activities required.  For the
610 * scheduler which support standard RTEMS features, this includes
611 * time-slicing management.
612 */
613RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
614{
615  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
616  Thread_Control *executing = cpu->executing;
617
618  if ( scheduler != NULL && executing != NULL ) {
619    ( *scheduler->Operations.tick )( scheduler, executing );
620  }
621}
622
623/**
624 * @brief Starts the idle thread for a particular processor.
625 *
626 * @param[in] scheduler The scheduler instance.
627 * @param[in,out] the_thread The idle thread for the processor.
628 * @param[in,out] cpu The processor for the idle thread.
629 *
630 * @see _Thread_Create_idle().
631 */
632RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
633  const Scheduler_Control *scheduler,
634  Thread_Control          *the_thread,
635  Per_CPU_Control         *cpu
636)
637{
638  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
639}
640
641RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
642  const Scheduler_Control *scheduler,
643  uint32_t                 cpu_index
644)
645{
646#if defined(RTEMS_SMP)
647  const Per_CPU_Control   *cpu;
648  const Scheduler_Control *scheduler_of_cpu;
649
650  cpu = _Per_CPU_Get_by_index( cpu_index );
651  scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
652
653  return scheduler_of_cpu == scheduler;
654#else
655  (void) scheduler;
656  (void) cpu_index;
657
658  return true;
659#endif
660}
661
662RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
663  const Scheduler_Control *scheduler,
664  size_t                   cpusetsize,
665  cpu_set_t               *cpuset
666)
667{
668  uint32_t cpu_count = _SMP_Get_processor_count();
669  uint32_t cpu_index;
670
671  CPU_ZERO_S( cpusetsize, cpuset );
672
673  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
674#if defined(RTEMS_SMP)
675    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
676      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
677    }
678#else
679    (void) scheduler;
680
681    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
682#endif
683  }
684}
685
686RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
687  const Scheduler_Control *scheduler,
688  Thread_Control          *the_thread,
689  size_t                   cpusetsize,
690  cpu_set_t               *cpuset
691)
692{
693  (void) the_thread;
694
695  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
696
697  return true;
698}
699
700bool _Scheduler_Get_affinity(
701  Thread_Control *the_thread,
702  size_t          cpusetsize,
703  cpu_set_t      *cpuset
704);
705
706RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
707  const Scheduler_Control *scheduler,
708  Thread_Control          *the_thread,
709  size_t                   cpusetsize,
710  const cpu_set_t         *cpuset
711)
712{
713  uint32_t cpu_count = _SMP_Get_processor_count();
714  uint32_t cpu_index;
715  bool     ok = true;
716
717  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
718#if defined(RTEMS_SMP)
719    const Per_CPU_Control   *cpu;
720    const Scheduler_Control *scheduler_of_cpu;
721
722    cpu = _Per_CPU_Get_by_index( cpu_index );
723    scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
724
725    ok = ok
726      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
727        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
728          && scheduler != scheduler_of_cpu ) );
729#else
730    (void) scheduler;
731
732    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
733#endif
734  }
735
736  return ok;
737}
738
739bool _Scheduler_Set_affinity(
740  Thread_Control  *the_thread,
741  size_t           cpusetsize,
742  const cpu_set_t *cpuset
743);
744
745RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
746  const Scheduler_Control *scheduler,
747  Thread_Control          *the_thread,
748  Scheduler_Node          *node,
749  void                  ( *extract )(
750                             const Scheduler_Control *,
751                             Thread_Control *,
752                             Scheduler_Node *
753                        ),
754  void                  ( *schedule )(
755                             const Scheduler_Control *,
756                             Thread_Control *,
757                             bool
758                        )
759)
760{
761  ( *extract )( scheduler, the_thread, node );
762
763  /* TODO: flash critical section? */
764
765  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
766    ( *schedule )( scheduler, the_thread, true );
767  }
768}
769
770RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
771  const Scheduler_Control *scheduler
772)
773{
774#if defined(RTEMS_SMP)
775  return _Scheduler_Get_context( scheduler )->processor_count;
776#else
777  (void) scheduler;
778
779  return 1;
780#endif
781}
782
783RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
784{
785  return _Objects_Build_id(
786    OBJECTS_FAKE_OBJECTS_API,
787    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
788    _Objects_Local_node,
789    (uint16_t) ( scheduler_index + 1 )
790  );
791}
792
793RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
794{
795  uint32_t minimum_id = _Scheduler_Build_id( 0 );
796
797  return id - minimum_id;
798}
799
800RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
801  Objects_Id id
802)
803{
804  uint32_t index;
805
806  index = _Scheduler_Get_index_by_id( id );
807
808  if ( index >= _Scheduler_Count ) {
809    return NULL;
810  }
811
812  return &_Scheduler_Table[ index ];
813}
814
815RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
816  const Scheduler_Control *scheduler
817)
818{
819  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
820}
821
822RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
823  Thread_Control   *the_thread,
824  Priority_Control  new_priority,
825  bool              prepend_it
826)
827{
828  Scheduler_Node *scheduler_node;
829
830  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
831  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
832}
833
834#if defined(RTEMS_SMP)
835/**
836 * @brief Gets an idle thread from the scheduler instance.
837 *
838 * @param[in] context The scheduler instance context.
839 *
840 * @retval idle An idle thread for use.  This function must always return an
841 * idle thread.  If none is available, then this is a fatal error.
842 */
843typedef Thread_Control *( *Scheduler_Get_idle_thread )(
844  Scheduler_Context *context
845);
846
847/**
848 * @brief Releases an idle thread to the scheduler instance for reuse.
849 *
850 * @param[in] context The scheduler instance context.
851 * @param[in] idle The idle thread to release
852 */
853typedef void ( *Scheduler_Release_idle_thread )(
854  Scheduler_Context *context,
855  Thread_Control    *idle
856);
857
858RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
859  Thread_Control         *the_thread,
860  Thread_Scheduler_state  new_state
861)
862{
863  _Assert(
864    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
865      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
866      || !_System_state_Is_up( _System_state_Get() )
867  );
868
869  the_thread->Scheduler.state = new_state;
870}
871
872RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
873  Scheduler_Node *node,
874  Thread_Control *idle
875)
876{
877  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
878  _Assert(
879    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
880  );
881
882  _Scheduler_Node_set_user( node, idle );
883  node->idle = idle;
884}
885
886/**
887 * @brief Use an idle thread for this scheduler node.
888 *
889 * A thread those home scheduler node has a sticky level greater than zero may
890 * use an idle thread in the home scheduler instance in case it executes
891 * currently in another scheduler instance or in case it is in a blocking
892 * state.
893 *
894 * @param[in] context The scheduler instance context.
895 * @param[in] node The node which wants to use the idle thread.
896 * @param[in] cpu The processor for the idle thread.
897 * @param[in] get_idle_thread Function to get an idle thread.
898 */
899RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
900  Scheduler_Context         *context,
901  Scheduler_Node            *node,
902  Per_CPU_Control           *cpu,
903  Scheduler_Get_idle_thread  get_idle_thread
904)
905{
906  Thread_Control *idle = ( *get_idle_thread )( context );
907
908  _Scheduler_Set_idle_thread( node, idle );
909  _Thread_Set_CPU( idle, cpu );
910  return idle;
911}
912
913typedef enum {
914  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
915  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
916  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
917} Scheduler_Try_to_schedule_action;
918
919/**
920 * @brief Try to schedule this scheduler node.
921 *
922 * @param[in] context The scheduler instance context.
923 * @param[in] node The node which wants to get scheduled.
924 * @param[in] idle A potential idle thread used by a potential victim node.
925 * @param[in] get_idle_thread Function to get an idle thread.
926 *
927 * @retval true This node can be scheduled.
928 * @retval false Otherwise.
929 */
930RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
931_Scheduler_Try_to_schedule_node(
932  Scheduler_Context         *context,
933  Scheduler_Node            *node,
934  Thread_Control            *idle,
935  Scheduler_Get_idle_thread  get_idle_thread
936)
937{
938  ISR_lock_Context                  lock_context;
939  Scheduler_Try_to_schedule_action  action;
940  Thread_Control                   *owner;
941
942  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
943  owner = _Scheduler_Node_get_owner( node );
944  _Assert( _Scheduler_Node_get_user( node ) == owner );
945  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
946
947  _Thread_Scheduler_acquire_critical( owner, &lock_context );
948
949  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
950    _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
951    _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
952  } else if (
953    owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
954      && node->sticky_level <= 1
955  ) {
956    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
957  } else if ( node->sticky_level == 0 ) {
958    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
959  } else if ( idle != NULL ) {
960    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
961  } else {
962    _Scheduler_Use_idle_thread(
963      context,
964      node,
965      _Thread_Get_CPU( owner ),
966      get_idle_thread
967    );
968  }
969
970  _Thread_Scheduler_release_critical( owner, &lock_context );
971  return action;
972}
973
974/**
975 * @brief Release an idle thread using this scheduler node.
976 *
977 * @param[in] context The scheduler instance context.
978 * @param[in] node The node which may have an idle thread as user.
979 * @param[in] release_idle_thread Function to release an idle thread.
980 *
981 * @retval idle The idle thread which used this node.
982 * @retval NULL This node had no idle thread as an user.
983 */
984RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
985  Scheduler_Context             *context,
986  Scheduler_Node                *node,
987  Scheduler_Release_idle_thread  release_idle_thread
988)
989{
990  Thread_Control *idle = _Scheduler_Node_get_idle( node );
991
992  if ( idle != NULL ) {
993    Thread_Control *owner = _Scheduler_Node_get_owner( node );
994
995    node->idle = NULL;
996    _Scheduler_Node_set_user( node, owner );
997    ( *release_idle_thread )( context, idle );
998  }
999
1000  return idle;
1001}
1002
1003RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1004  Scheduler_Node *needs_idle,
1005  Scheduler_Node *uses_idle,
1006  Thread_Control *idle
1007)
1008{
1009  uses_idle->idle = NULL;
1010  _Scheduler_Node_set_user(
1011    uses_idle,
1012    _Scheduler_Node_get_owner( uses_idle )
1013  );
1014  _Scheduler_Set_idle_thread( needs_idle, idle );
1015}
1016
1017/**
1018 * @brief Block this scheduler node.
1019 *
1020 * @param[in] context The scheduler instance context.
1021 * @param[in] thread The thread which wants to get blocked referencing this
1022 *   node.  This is not necessarily the user of this node in case the node
1023 *   participates in the scheduler helping protocol.
1024 * @param[in] node The node which wants to get blocked.
1025 * @param[in] is_scheduled This node is scheduled.
1026 * @param[in] get_idle_thread Function to get an idle thread.
1027 *
1028 * @retval thread_cpu The processor of the thread.  Indicates to continue with
1029 *   the blocking operation.
1030 * @retval NULL Otherwise.
1031 */
1032RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
1033  Scheduler_Context         *context,
1034  Thread_Control            *thread,
1035  Scheduler_Node            *node,
1036  bool                       is_scheduled,
1037  Scheduler_Get_idle_thread  get_idle_thread
1038)
1039{
1040  int               sticky_level;
1041  ISR_lock_Context  lock_context;
1042  Per_CPU_Control  *thread_cpu;
1043
1044  sticky_level = node->sticky_level;
1045  --sticky_level;
1046  node->sticky_level = sticky_level;
1047  _Assert( sticky_level >= 0 );
1048
1049  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1050  thread_cpu = _Thread_Get_CPU( thread );
1051  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
1052  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1053  _Thread_Scheduler_release_critical( thread, &lock_context );
1054
1055  if ( sticky_level > 0 ) {
1056    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
1057      Thread_Control *idle;
1058
1059      idle = _Scheduler_Use_idle_thread(
1060        context,
1061        node,
1062        thread_cpu,
1063        get_idle_thread
1064      );
1065      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
1066    }
1067
1068    return NULL;
1069  }
1070
1071  _Assert( thread == _Scheduler_Node_get_user( node ) );
1072  return thread_cpu;
1073}
1074
1075RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
1076  Scheduler_Context             *context,
1077  Thread_Control                *the_thread,
1078  Scheduler_Node                *node,
1079  Scheduler_Release_idle_thread  release_idle_thread
1080)
1081{
1082  Thread_Control  *idle;
1083  Thread_Control  *owner;
1084  Per_CPU_Control *cpu;
1085
1086  idle = _Scheduler_Node_get_idle( node );
1087  owner = _Scheduler_Node_get_owner( node );
1088
1089  node->idle = NULL;
1090  _Assert( _Scheduler_Node_get_user( node ) == idle );
1091  _Scheduler_Node_set_user( node, owner );
1092  ( *release_idle_thread )( context, idle );
1093
1094  cpu = _Thread_Get_CPU( idle );
1095  _Thread_Set_CPU( the_thread, cpu );
1096  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
1097}
1098
1099/**
1100 * @brief Unblock this scheduler node.
1101 *
1102 * @param[in] context The scheduler instance context.
1103 * @param[in] the_thread The thread which wants to get unblocked.
1104 * @param[in] node The node which wants to get unblocked.
1105 * @param[in] is_scheduled This node is scheduled.
1106 * @param[in] release_idle_thread Function to release an idle thread.
1107 *
1108 * @retval true Continue with the unblocking operation.
1109 * @retval false Otherwise.
1110 */
1111RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1112  Scheduler_Context             *context,
1113  Thread_Control                *the_thread,
1114  Scheduler_Node                *node,
1115  bool                           is_scheduled,
1116  Scheduler_Release_idle_thread  release_idle_thread
1117)
1118{
1119  bool unblock;
1120
1121  ++node->sticky_level;
1122  _Assert( node->sticky_level > 0 );
1123
1124  if ( is_scheduled ) {
1125    _Scheduler_Discard_idle_thread(
1126      context,
1127      the_thread,
1128      node,
1129      release_idle_thread
1130    );
1131    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1132    unblock = false;
1133  } else {
1134    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1135    unblock = true;
1136  }
1137
1138  return unblock;
1139}
1140#endif
1141
1142RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1143  Thread_Control *new_heir,
1144  bool            force_dispatch
1145)
1146{
1147  Thread_Control *heir = _Thread_Heir;
1148
1149  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1150#if defined(RTEMS_SMP)
1151    /*
1152     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1153     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1154     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1155     * schedulers.
1156     */
1157    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1158    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1159#endif
1160    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1161    _Thread_Heir = new_heir;
1162    _Thread_Dispatch_necessary = true;
1163  }
1164}
1165
1166RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1167  const Scheduler_Control *new_scheduler,
1168  Thread_Control          *the_thread,
1169  Priority_Control         priority
1170)
1171{
1172  Scheduler_Node          *new_scheduler_node;
1173  Scheduler_Node          *old_scheduler_node;
1174#if defined(RTEMS_SMP)
1175  ISR_lock_Context         lock_context;
1176  const Scheduler_Control *old_scheduler;
1177
1178#endif
1179
1180  if ( the_thread->Wait.queue != NULL ) {
1181    return STATUS_RESOURCE_IN_USE;
1182  }
1183
1184  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1185  _Priority_Plain_extract(
1186    &old_scheduler_node->Wait.Priority,
1187    &the_thread->Real_priority
1188  );
1189
1190  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1191    _Priority_Plain_insert(
1192      &old_scheduler_node->Wait.Priority,
1193      &the_thread->Real_priority,
1194      the_thread->Real_priority.priority
1195    );
1196    return STATUS_RESOURCE_IN_USE;
1197  }
1198
1199#if defined(RTEMS_SMP)
1200  if ( !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes ) ) {
1201    _Priority_Plain_insert(
1202      &old_scheduler_node->Wait.Priority,
1203      &the_thread->Real_priority,
1204      the_thread->Real_priority.priority
1205    );
1206    return STATUS_RESOURCE_IN_USE;
1207  }
1208
1209  old_scheduler = _Thread_Scheduler_get_home( the_thread );
1210
1211  _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1212
1213  if ( _Scheduler_Get_processor_count( new_scheduler ) == 0 ) {
1214    _Scheduler_Release_critical( new_scheduler, &lock_context );
1215    _Priority_Plain_insert(
1216      &old_scheduler_node->Wait.Priority,
1217      &the_thread->Real_priority,
1218      the_thread->Real_priority.priority
1219    );
1220    return STATUS_UNSATISFIED;
1221  }
1222
1223  the_thread->Scheduler.home = new_scheduler;
1224
1225  _Scheduler_Release_critical( new_scheduler, &lock_context );
1226
1227  _Thread_Scheduler_process_requests( the_thread );
1228  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1229    the_thread,
1230    _Scheduler_Get_index( new_scheduler )
1231  );
1232#else
1233  new_scheduler_node = old_scheduler_node;
1234#endif
1235
1236  the_thread->Start.initial_priority = priority;
1237  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1238  _Priority_Initialize_one(
1239    &new_scheduler_node->Wait.Priority,
1240    &the_thread->Real_priority
1241  );
1242
1243#if defined(RTEMS_SMP)
1244  if ( old_scheduler != new_scheduler ) {
1245    States_Control current_state;
1246
1247    current_state = the_thread->current_state;
1248
1249    if ( _States_Is_ready( current_state ) ) {
1250      _Scheduler_Block( the_thread );
1251    }
1252
1253    _Assert( old_scheduler_node->sticky_level == 0 );
1254    _Assert( new_scheduler_node->sticky_level == 0 );
1255
1256    _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1257    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1258    _Chain_Initialize_one(
1259      &the_thread->Scheduler.Wait_nodes,
1260      &new_scheduler_node->Thread.Wait_node
1261    );
1262    _Chain_Extract_unprotected(
1263      &old_scheduler_node->Thread.Scheduler_node.Chain
1264    );
1265    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1266    _Chain_Initialize_one(
1267      &the_thread->Scheduler.Scheduler_nodes,
1268      &new_scheduler_node->Thread.Scheduler_node.Chain
1269    );
1270
1271    _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1272
1273    if ( _States_Is_ready( current_state ) ) {
1274      _Scheduler_Unblock( the_thread );
1275    }
1276
1277    return STATUS_SUCCESSFUL;
1278  }
1279#endif
1280
1281  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1282  _Scheduler_Update_priority( the_thread );
1283  return STATUS_SUCCESSFUL;
1284}
1285
1286/** @} */
1287
1288#ifdef __cplusplus
1289}
1290#endif
1291
1292#endif
1293/* end of include file */
Note: See TracBrowser for help on using the repository browser.