source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 63e2ca1b

5
Last change on this file since 63e2ca1b was 63e2ca1b, checked in by Sebastian Huber <sebastian.huber@…>, on 10/31/16 at 08:13:35

score: Simplify yield and unblock scheduler ops

Update #2556.

  • Property mode set to 100644
File size: 35.9 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/cpusetimpl.h>
26#include <rtems/score/priorityimpl.h>
27#include <rtems/score/smpimpl.h>
28#include <rtems/score/status.h>
29#include <rtems/score/threadimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35/**
36 * @addtogroup ScoreScheduler
37 */
38/**@{**/
39
40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
57  uint32_t cpu_index
58)
59{
60#if defined(RTEMS_SMP)
61  return _Scheduler_Assignments[ cpu_index ].scheduler;
62#else
63  (void) cpu_index;
64
65  return &_Scheduler_Table[ 0 ];
66#endif
67}
68
69RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
70  const Per_CPU_Control *cpu
71)
72{
73  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
74
75  return _Scheduler_Get_by_CPU_index( cpu_index );
76}
77
78/**
79 * @brief Acquires the scheduler instance inside a critical section (interrupts
80 * disabled).
81 *
82 * @param[in] scheduler The scheduler instance.
83 * @param[in] lock_context The lock context to use for
84 *   _Scheduler_Release_critical().
85 */
86RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
87  const Scheduler_Control *scheduler,
88  ISR_lock_Context        *lock_context
89)
90{
91#if defined(RTEMS_SMP)
92  Scheduler_Context *context;
93
94  context = _Scheduler_Get_context( scheduler );
95  _ISR_lock_Acquire( &context->Lock, lock_context );
96#else
97  (void) scheduler;
98  (void) lock_context;
99#endif
100}
101
102/**
103 * @brief Releases the scheduler instance inside a critical section (interrupts
104 * disabled).
105 *
106 * @param[in] scheduler The scheduler instance.
107 * @param[in] lock_context The lock context used for
108 *   _Scheduler_Acquire_critical().
109 */
110RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
111  const Scheduler_Control *scheduler,
112  ISR_lock_Context        *lock_context
113)
114{
115#if defined(RTEMS_SMP)
116  Scheduler_Context *context;
117
118  context = _Scheduler_Get_context( scheduler );
119  _ISR_lock_Release( &context->Lock, lock_context );
120#else
121  (void) scheduler;
122  (void) lock_context;
123#endif
124}
125
126/**
127 * The preferred method to add a new scheduler is to define the jump table
128 * entries and add a case to the _Scheduler_Initialize routine.
129 *
130 * Generic scheduling implementations that rely on the ready queue only can
131 * be found in the _Scheduler_queue_XXX functions.
132 */
133
134/*
135 * Passing the Scheduler_Control* to these functions allows for multiple
136 * scheduler's to exist simultaneously, which could be useful on an SMP
137 * system.  Then remote Schedulers may be accessible.  How to protect such
138 * accesses remains an open problem.
139 */
140
141/**
142 * @brief General scheduling decision.
143 *
144 * This kernel routine implements the scheduling decision logic for
145 * the scheduler. It does NOT dispatch.
146 *
147 * @param[in] the_thread The thread which state changed previously.
148 */
149RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
150{
151  const Scheduler_Control *scheduler;
152  ISR_lock_Context         lock_context;
153
154  scheduler = _Thread_Scheduler_get_home( the_thread );
155  _Scheduler_Acquire_critical( scheduler, &lock_context );
156
157  ( *scheduler->Operations.schedule )( scheduler, the_thread );
158
159  _Scheduler_Release_critical( scheduler, &lock_context );
160}
161
162/**
163 * @brief Scheduler yield with a particular thread.
164 *
165 * This routine is invoked when a thread wishes to voluntarily transfer control
166 * of the processor to another thread.
167 *
168 * @param[in] the_thread The yielding thread.
169 */
170RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
171{
172#if defined(RTEMS_SMP)
173  Chain_Node              *node;
174  const Chain_Node        *tail;
175  Scheduler_Node          *scheduler_node;
176  const Scheduler_Control *scheduler;
177  ISR_lock_Context         lock_context;
178  bool                     needs_help;
179
180  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
181  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
182
183  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
184  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
185
186  _Scheduler_Acquire_critical( scheduler, &lock_context );
187  needs_help = ( *scheduler->Operations.yield )(
188    scheduler,
189    the_thread,
190    _Thread_Scheduler_get_home_node( the_thread )
191  );
192  _Scheduler_Release_critical( scheduler, &lock_context );
193
194  if ( !needs_help ) {
195    return;
196  }
197
198  node = _Chain_Next( node );
199
200  while ( node != tail ) {
201    bool success;
202
203    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
204    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
205
206    _Scheduler_Acquire_critical( scheduler, &lock_context );
207    success = ( *scheduler->Operations.ask_for_help )(
208      scheduler,
209      the_thread,
210      scheduler_node
211    );
212    _Scheduler_Release_critical( scheduler, &lock_context );
213
214    if ( success ) {
215      break;
216    }
217
218    node = _Chain_Next( node );
219  }
220#else
221  const Scheduler_Control *scheduler;
222
223  scheduler = _Thread_Scheduler_get_home( the_thread );
224  ( *scheduler->Operations.yield )(
225    scheduler,
226    the_thread,
227    _Thread_Scheduler_get_home_node( the_thread )
228  );
229#endif
230}
231
232/**
233 * @brief Blocks a thread with respect to the scheduler.
234 *
235 * This routine removes @a the_thread from the scheduling decision for
236 * the scheduler. The primary task is to remove the thread from the
237 * ready queue.  It performs any necessary schedulering operations
238 * including the selection of a new heir thread.
239 *
240 * @param[in] the_thread The thread.
241 */
242RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
243{
244#if defined(RTEMS_SMP)
245  Chain_Node              *node;
246  const Chain_Node        *tail;
247  Scheduler_Node          *scheduler_node;
248  const Scheduler_Control *scheduler;
249  ISR_lock_Context         lock_context;
250
251  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
252  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
253
254  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
255  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
256
257  _Scheduler_Acquire_critical( scheduler, &lock_context );
258  ( *scheduler->Operations.block )(
259    scheduler,
260    the_thread,
261    scheduler_node
262  );
263  _Scheduler_Release_critical( scheduler, &lock_context );
264
265  node = _Chain_Next( node );
266
267  while ( node != tail ) {
268    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
269    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
270
271    _Scheduler_Acquire_critical( scheduler, &lock_context );
272    ( *scheduler->Operations.withdraw_node )(
273      scheduler,
274      the_thread,
275      scheduler_node,
276      THREAD_SCHEDULER_BLOCKED
277    );
278    _Scheduler_Release_critical( scheduler, &lock_context );
279
280    node = _Chain_Next( node );
281  }
282#else
283  const Scheduler_Control *scheduler;
284
285  scheduler = _Thread_Scheduler_get_home( the_thread );
286  ( *scheduler->Operations.block )(
287    scheduler,
288    the_thread,
289    _Thread_Scheduler_get_home_node( the_thread )
290  );
291#endif
292}
293
294/**
295 * @brief Unblocks a thread with respect to the scheduler.
296 *
297 * This operation must fetch the latest thread priority value for this
298 * scheduler instance and update its internal state if necessary.
299 *
300 * @param[in] the_thread The thread.
301 *
302 * @see _Scheduler_Node_get_priority().
303 */
304RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
305{
306#if defined(RTEMS_SMP)
307  Chain_Node              *node;
308  const Chain_Node        *tail;
309  Scheduler_Node          *scheduler_node;
310  const Scheduler_Control *scheduler;
311  ISR_lock_Context         lock_context;
312  bool                     needs_help;
313
314  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
315  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
316
317  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
318  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
319
320  _Scheduler_Acquire_critical( scheduler, &lock_context );
321  needs_help = ( *scheduler->Operations.unblock )(
322    scheduler,
323    the_thread,
324    scheduler_node
325  );
326  _Scheduler_Release_critical( scheduler, &lock_context );
327
328  if ( !needs_help ) {
329    return;
330  }
331
332  node = _Chain_Next( node );
333
334  while ( node != tail ) {
335    bool success;
336
337    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
338    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
339
340    _Scheduler_Acquire_critical( scheduler, &lock_context );
341    success = ( *scheduler->Operations.ask_for_help )(
342      scheduler,
343      the_thread,
344      scheduler_node
345    );
346    _Scheduler_Release_critical( scheduler, &lock_context );
347
348    if ( success ) {
349      break;
350    }
351
352    node = _Chain_Next( node );
353  }
354#else
355  const Scheduler_Control *scheduler;
356
357  scheduler = _Thread_Scheduler_get_home( the_thread );
358  ( *scheduler->Operations.unblock )(
359    scheduler,
360    the_thread,
361    _Thread_Scheduler_get_home_node( the_thread )
362  );
363#endif
364}
365
366/**
367 * @brief Propagates a priority change of a thread to the scheduler.
368 *
369 * On uni-processor configurations, this operation must evaluate the thread
370 * state.  In case the thread is not ready, then the priority update should be
371 * deferred to the next scheduler unblock operation.
372 *
373 * The operation must update the heir and thread dispatch necessary variables
374 * in case the set of scheduled threads changes.
375 *
376 * @param[in] the_thread The thread changing its priority.
377 *
378 * @see _Scheduler_Node_get_priority().
379 */
380RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
381{
382#if defined(RTEMS_SMP)
383  Chain_Node       *node;
384  const Chain_Node *tail;
385
386  _Thread_Scheduler_process_requests( the_thread );
387
388  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
389  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
390
391  do {
392    Scheduler_Node          *scheduler_node;
393    const Scheduler_Control *scheduler;
394    ISR_lock_Context         lock_context;
395
396    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
397    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
398
399    _Scheduler_Acquire_critical( scheduler, &lock_context );
400    ( *scheduler->Operations.update_priority )(
401      scheduler,
402      the_thread,
403      scheduler_node
404    );
405    _Scheduler_Release_critical( scheduler, &lock_context );
406
407    node = _Chain_Next( node );
408  } while ( node != tail );
409#else
410  const Scheduler_Control *scheduler;
411
412  scheduler = _Thread_Scheduler_get_home( the_thread );
413  ( *scheduler->Operations.update_priority )(
414    scheduler,
415    the_thread,
416    _Thread_Scheduler_get_home_node( the_thread )
417  );
418#endif
419}
420
421#if defined(RTEMS_SMP)
422/**
423 * @brief Changes the sticky level of the home scheduler node and propagates a
424 * priority change of a thread to the scheduler.
425 *
426 * @param[in] the_thread The thread changing its priority or sticky level.
427 *
428 * @see _Scheduler_Update_priority().
429 */
430RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
431  Thread_Control *the_thread,
432  int             sticky_level_change
433)
434{
435  Chain_Node              *node;
436  const Chain_Node        *tail;
437  Scheduler_Node          *scheduler_node;
438  const Scheduler_Control *scheduler;
439  ISR_lock_Context         lock_context;
440
441  _Thread_Scheduler_process_requests( the_thread );
442
443  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
444  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
445  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
446
447  _Scheduler_Acquire_critical( scheduler, &lock_context );
448
449  scheduler_node->sticky_level += sticky_level_change;
450  _Assert( scheduler_node->sticky_level >= 0 );
451
452  ( *scheduler->Operations.update_priority )(
453    scheduler,
454    the_thread,
455    scheduler_node
456  );
457
458  _Scheduler_Release_critical( scheduler, &lock_context );
459
460  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
461  node = _Chain_Next( node );
462
463  while ( node != tail ) {
464    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
465    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
466
467    _Scheduler_Acquire_critical( scheduler, &lock_context );
468    ( *scheduler->Operations.update_priority )(
469      scheduler,
470      the_thread,
471      scheduler_node
472    );
473    _Scheduler_Release_critical( scheduler, &lock_context );
474
475    node = _Chain_Next( node );
476  }
477}
478#endif
479
480/**
481 * @brief Maps a thread priority from the user domain to the scheduler domain.
482 *
483 * Let M be the maximum scheduler priority.  The mapping must be bijective in
484 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
485 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
486 * other values the mapping is undefined.
487 *
488 * @param[in] scheduler The scheduler instance.
489 * @param[in] priority The user domain thread priority.
490 *
491 * @return The corresponding thread priority of the scheduler domain is returned.
492 */
493RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
494  const Scheduler_Control *scheduler,
495  Priority_Control         priority
496)
497{
498  return ( *scheduler->Operations.map_priority )( scheduler, priority );
499}
500
501/**
502 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
503 *
504 * @param[in] scheduler The scheduler instance.
505 * @param[in] priority The scheduler domain thread priority.
506 *
507 * @return The corresponding thread priority of the user domain is returned.
508 */
509RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
510  const Scheduler_Control *scheduler,
511  Priority_Control         priority
512)
513{
514  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
515}
516
517/**
518 * @brief Initializes a scheduler node.
519 *
520 * The scheduler node contains arbitrary data on function entry.  The caller
521 * must ensure that _Scheduler_Node_destroy() will be called after a
522 * _Scheduler_Node_initialize() before the memory of the scheduler node is
523 * destroyed.
524 *
525 * @param[in] scheduler The scheduler instance.
526 * @param[in] node The scheduler node to initialize.
527 * @param[in] the_thread The thread of the scheduler node to initialize.
528 * @param[in] priority The thread priority.
529 */
530RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
531  const Scheduler_Control *scheduler,
532  Scheduler_Node          *node,
533  Thread_Control          *the_thread,
534  Priority_Control         priority
535)
536{
537  ( *scheduler->Operations.node_initialize )(
538    scheduler,
539    node,
540    the_thread,
541    priority
542  );
543}
544
545/**
546 * @brief Destroys a scheduler node.
547 *
548 * The caller must ensure that _Scheduler_Node_destroy() will be called only
549 * after a corresponding _Scheduler_Node_initialize().
550 *
551 * @param[in] scheduler The scheduler instance.
552 * @param[in] node The scheduler node to destroy.
553 */
554RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
555  const Scheduler_Control *scheduler,
556  Scheduler_Node          *node
557)
558{
559  ( *scheduler->Operations.node_destroy )( scheduler, node );
560}
561
562/**
563 * @brief Releases a job of a thread with respect to the scheduler.
564 *
565 * @param[in] the_thread The thread.
566 * @param[in] priority_node The priority node of the job.
567 * @param[in] deadline The deadline in watchdog ticks since boot.
568 * @param[in] queue_context The thread queue context to provide the set of
569 *   threads for _Thread_Priority_update().
570 */
571RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
572  Thread_Control       *the_thread,
573  Priority_Node        *priority_node,
574  uint64_t              deadline,
575  Thread_queue_Context *queue_context
576)
577{
578  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
579
580  _Thread_queue_Context_clear_priority_updates( queue_context );
581  ( *scheduler->Operations.release_job )(
582    scheduler,
583    the_thread,
584    priority_node,
585    deadline,
586    queue_context
587  );
588}
589
590/**
591 * @brief Cancels a job of a thread with respect to the scheduler.
592 *
593 * @param[in] the_thread The thread.
594 * @param[in] priority_node The priority node of the job.
595 * @param[in] queue_context The thread queue context to provide the set of
596 *   threads for _Thread_Priority_update().
597 */
598RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
599  Thread_Control       *the_thread,
600  Priority_Node        *priority_node,
601  Thread_queue_Context *queue_context
602)
603{
604  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
605
606  _Thread_queue_Context_clear_priority_updates( queue_context );
607  ( *scheduler->Operations.cancel_job )(
608    scheduler,
609    the_thread,
610    priority_node,
611    queue_context
612  );
613}
614
615/**
616 * @brief Scheduler method invoked at each clock tick.
617 *
618 * This method is invoked at each clock tick to allow the scheduler
619 * implementation to perform any activities required.  For the
620 * scheduler which support standard RTEMS features, this includes
621 * time-slicing management.
622 */
623RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
624{
625  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
626  Thread_Control *executing = cpu->executing;
627
628  if ( scheduler != NULL && executing != NULL ) {
629    ( *scheduler->Operations.tick )( scheduler, executing );
630  }
631}
632
633/**
634 * @brief Starts the idle thread for a particular processor.
635 *
636 * @param[in] scheduler The scheduler instance.
637 * @param[in,out] the_thread The idle thread for the processor.
638 * @param[in,out] cpu The processor for the idle thread.
639 *
640 * @see _Thread_Create_idle().
641 */
642RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
643  const Scheduler_Control *scheduler,
644  Thread_Control          *the_thread,
645  Per_CPU_Control         *cpu
646)
647{
648  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
649}
650
651#if defined(RTEMS_SMP)
652RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
653  uint32_t cpu_index
654)
655{
656  return &_Scheduler_Assignments[ cpu_index ];
657}
658
659RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
660  const Scheduler_Assignment *assignment
661)
662{
663  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
664}
665
666RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
667  const Scheduler_Assignment *assignment
668)
669{
670  return assignment->scheduler != NULL;
671}
672#endif /* defined(RTEMS_SMP) */
673
674RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
675  const Scheduler_Control *scheduler,
676  uint32_t cpu_index
677)
678{
679#if defined(RTEMS_SMP)
680  const Scheduler_Assignment *assignment =
681    _Scheduler_Get_assignment( cpu_index );
682
683  return assignment->scheduler == scheduler;
684#else
685  (void) scheduler;
686  (void) cpu_index;
687
688  return true;
689#endif
690}
691
692#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
693
694RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
695  const Scheduler_Control *scheduler,
696  size_t                   cpusetsize,
697  cpu_set_t               *cpuset
698)
699{
700  uint32_t cpu_count = _SMP_Get_processor_count();
701  uint32_t cpu_index;
702
703  CPU_ZERO_S( cpusetsize, cpuset );
704
705  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
706#if defined(RTEMS_SMP)
707    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
708      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
709    }
710#else
711    (void) scheduler;
712
713    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
714#endif
715  }
716}
717
718RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
719  const Scheduler_Control *scheduler,
720  Thread_Control          *the_thread,
721  size_t                   cpusetsize,
722  cpu_set_t               *cpuset
723)
724{
725  (void) the_thread;
726
727  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
728
729  return true;
730}
731
732bool _Scheduler_Get_affinity(
733  Thread_Control *the_thread,
734  size_t          cpusetsize,
735  cpu_set_t      *cpuset
736);
737
738RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
739  const Scheduler_Control *scheduler,
740  Thread_Control          *the_thread,
741  size_t                   cpusetsize,
742  const cpu_set_t         *cpuset
743)
744{
745  uint32_t cpu_count = _SMP_Get_processor_count();
746  uint32_t cpu_index;
747  bool     ok = true;
748
749  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
750#if defined(RTEMS_SMP)
751    const Scheduler_Control *scheduler_of_cpu =
752      _Scheduler_Get_by_CPU_index( cpu_index );
753
754    ok = ok
755      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
756        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
757          && scheduler != scheduler_of_cpu ) );
758#else
759    (void) scheduler;
760
761    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
762#endif
763  }
764
765  return ok;
766}
767
768bool _Scheduler_Set_affinity(
769  Thread_Control  *the_thread,
770  size_t           cpusetsize,
771  const cpu_set_t *cpuset
772);
773
774#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
775
776RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
777  const Scheduler_Control *scheduler,
778  Thread_Control          *the_thread,
779  Scheduler_Node          *node,
780  void                  ( *extract )(
781                             const Scheduler_Control *,
782                             Thread_Control *,
783                             Scheduler_Node *
784                        ),
785  void                  ( *schedule )(
786                             const Scheduler_Control *,
787                             Thread_Control *,
788                             bool
789                        )
790)
791{
792  ( *extract )( scheduler, the_thread, node );
793
794  /* TODO: flash critical section? */
795
796  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
797    ( *schedule )( scheduler, the_thread, true );
798  }
799}
800
801RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
802  const Scheduler_Control *scheduler
803)
804{
805#if defined(RTEMS_SMP)
806  return _Scheduler_Get_context( scheduler )->processor_count;
807#else
808  (void) scheduler;
809
810  return 1;
811#endif
812}
813
814RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
815{
816  return _Objects_Build_id(
817    OBJECTS_FAKE_OBJECTS_API,
818    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
819    _Objects_Local_node,
820    (uint16_t) ( scheduler_index + 1 )
821  );
822}
823
824RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
825{
826  uint32_t minimum_id = _Scheduler_Build_id( 0 );
827
828  return id - minimum_id;
829}
830
831RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
832  Objects_Id                id,
833  const Scheduler_Control **scheduler_p
834)
835{
836  uint32_t index = _Scheduler_Get_index_by_id( id );
837  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
838
839  *scheduler_p = scheduler;
840
841  return index < _Scheduler_Count
842    && _Scheduler_Get_processor_count( scheduler ) > 0;
843}
844
845RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
846{
847  const Scheduler_Control *scheduler;
848  bool ok = _Scheduler_Get_by_id( id, &scheduler );
849
850  (void) scheduler;
851
852  return ok;
853}
854
855RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
856  const Scheduler_Control *scheduler
857)
858{
859  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
860}
861
862RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
863  Thread_Control   *the_thread,
864  Priority_Control  new_priority,
865  bool              prepend_it
866)
867{
868  Scheduler_Node *scheduler_node;
869
870  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
871  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
872}
873
874#if defined(RTEMS_SMP)
875/**
876 * @brief Gets an idle thread from the scheduler instance.
877 *
878 * @param[in] context The scheduler instance context.
879 *
880 * @retval idle An idle thread for use.  This function must always return an
881 * idle thread.  If none is available, then this is a fatal error.
882 */
883typedef Thread_Control *( *Scheduler_Get_idle_thread )(
884  Scheduler_Context *context
885);
886
887/**
888 * @brief Releases an idle thread to the scheduler instance for reuse.
889 *
890 * @param[in] context The scheduler instance context.
891 * @param[in] idle The idle thread to release
892 */
893typedef void ( *Scheduler_Release_idle_thread )(
894  Scheduler_Context *context,
895  Thread_Control    *idle
896);
897
898extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
899
900RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
901  Thread_Control         *the_thread,
902  Thread_Scheduler_state  new_state
903)
904{
905  _Assert(
906    _Scheduler_Thread_state_valid_state_changes
907      [ the_thread->Scheduler.state ][ new_state ]
908  );
909  _Assert(
910    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
911      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
912      || !_System_state_Is_up( _System_state_Get() )
913  );
914
915  the_thread->Scheduler.state = new_state;
916}
917
918RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
919  Scheduler_Node *node,
920  Thread_Control *idle
921)
922{
923  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
924  _Assert(
925    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
926  );
927
928  _Scheduler_Node_set_user( node, idle );
929  node->idle = idle;
930}
931
932/**
933 * @brief Use an idle thread for this scheduler node.
934 *
935 * A thread those home scheduler node has a sticky level greater than zero may
936 * use an idle thread in the home scheduler instance in case it executes
937 * currently in another scheduler instance or in case it is in a blocking
938 * state.
939 *
940 * @param[in] context The scheduler instance context.
941 * @param[in] node The node which wants to use the idle thread.
942 * @param[in] cpu The processor for the idle thread.
943 * @param[in] get_idle_thread Function to get an idle thread.
944 */
945RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
946  Scheduler_Context         *context,
947  Scheduler_Node            *node,
948  Per_CPU_Control           *cpu,
949  Scheduler_Get_idle_thread  get_idle_thread
950)
951{
952  Thread_Control *idle = ( *get_idle_thread )( context );
953
954  _Scheduler_Set_idle_thread( node, idle );
955  _Thread_Set_CPU( idle, cpu );
956  return idle;
957}
958
959typedef enum {
960  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
961  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
962  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
963} Scheduler_Try_to_schedule_action;
964
965/**
966 * @brief Try to schedule this scheduler node.
967 *
968 * @param[in] context The scheduler instance context.
969 * @param[in] node The node which wants to get scheduled.
970 * @param[in] idle A potential idle thread used by a potential victim node.
971 * @param[in] get_idle_thread Function to get an idle thread.
972 *
973 * @retval true This node can be scheduled.
974 * @retval false Otherwise.
975 */
976RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
977_Scheduler_Try_to_schedule_node(
978  Scheduler_Context         *context,
979  Scheduler_Node            *node,
980  Thread_Control            *idle,
981  Scheduler_Get_idle_thread  get_idle_thread
982)
983{
984  ISR_lock_Context                  lock_context;
985  Scheduler_Try_to_schedule_action  action;
986  Thread_Control                   *user;
987
988  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
989  user = _Scheduler_Node_get_user( node );
990
991  _Thread_Scheduler_acquire_critical( user, &lock_context );
992
993  if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
994    _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) );
995    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
996  } else if (
997    user->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
998      || node->sticky_level == 0
999  ) {
1000    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1001  } else if ( idle != NULL ) {
1002    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1003  } else {
1004    _Scheduler_Use_idle_thread(
1005      context,
1006      node,
1007      _Thread_Get_CPU( user ),
1008      get_idle_thread
1009    );
1010  }
1011
1012  _Thread_Scheduler_release_critical( user, &lock_context );
1013  return action;
1014}
1015
1016/**
1017 * @brief Release an idle thread using this scheduler node.
1018 *
1019 * @param[in] context The scheduler instance context.
1020 * @param[in] node The node which may have an idle thread as user.
1021 * @param[in] release_idle_thread Function to release an idle thread.
1022 *
1023 * @retval idle The idle thread which used this node.
1024 * @retval NULL This node had no idle thread as an user.
1025 */
1026RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1027  Scheduler_Context             *context,
1028  Scheduler_Node                *node,
1029  Scheduler_Release_idle_thread  release_idle_thread
1030)
1031{
1032  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1033
1034  if ( idle != NULL ) {
1035    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1036
1037    node->idle = NULL;
1038    _Scheduler_Node_set_user( node, owner );
1039    ( *release_idle_thread )( context, idle );
1040  }
1041
1042  return idle;
1043}
1044
1045RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1046  Scheduler_Node *needs_idle,
1047  Scheduler_Node *uses_idle,
1048  Thread_Control *idle
1049)
1050{
1051  uses_idle->idle = NULL;
1052  _Scheduler_Node_set_user(
1053    uses_idle,
1054    _Scheduler_Node_get_owner( uses_idle )
1055  );
1056  _Scheduler_Set_idle_thread( needs_idle, idle );
1057}
1058
1059/**
1060 * @brief Block this scheduler node.
1061 *
1062 * @param[in] context The scheduler instance context.
1063 * @param[in] thread The thread which wants to get blocked referencing this
1064 *   node.  This is not necessarily the user of this node in case the node
1065 *   participates in the scheduler helping protocol.
1066 * @param[in] node The node which wants to get blocked.
1067 * @param[in] is_scheduled This node is scheduled.
1068 * @param[in] get_idle_thread Function to get an idle thread.
1069 *
1070 * @retval thread_cpu The processor of the thread.  Indicates to continue with
1071 *   the blocking operation.
1072 * @retval NULL Otherwise.
1073 */
1074RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
1075  Scheduler_Context         *context,
1076  Thread_Control            *thread,
1077  Scheduler_Node            *node,
1078  bool                       is_scheduled,
1079  Scheduler_Get_idle_thread  get_idle_thread
1080)
1081{
1082  int               sticky_level;
1083  ISR_lock_Context  lock_context;
1084  Per_CPU_Control  *thread_cpu;
1085
1086  sticky_level = node->sticky_level;
1087  --sticky_level;
1088  node->sticky_level = sticky_level;
1089  _Assert( sticky_level >= 0 );
1090
1091  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1092  thread_cpu = _Thread_Get_CPU( thread );
1093  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
1094  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1095  _Thread_Scheduler_release_critical( thread, &lock_context );
1096
1097  if ( sticky_level > 0 ) {
1098    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
1099      Thread_Control *idle;
1100
1101      idle = _Scheduler_Use_idle_thread(
1102        context,
1103        node,
1104        thread_cpu,
1105        get_idle_thread
1106      );
1107      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
1108    }
1109
1110    return NULL;
1111  }
1112
1113  _Assert( thread == _Scheduler_Node_get_user( node ) );
1114  return thread_cpu;
1115}
1116
1117RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
1118  Scheduler_Context             *context,
1119  Thread_Control                *the_thread,
1120  Scheduler_Node                *node,
1121  Scheduler_Release_idle_thread  release_idle_thread
1122)
1123{
1124  Thread_Control  *idle;
1125  Thread_Control  *owner;
1126  Per_CPU_Control *cpu;
1127
1128  idle = _Scheduler_Node_get_idle( node );
1129  owner = _Scheduler_Node_get_owner( node );
1130
1131  node->idle = NULL;
1132  _Assert( _Scheduler_Node_get_user( node ) == idle );
1133  _Scheduler_Node_set_user( node, owner );
1134  ( *release_idle_thread )( context, idle );
1135
1136  cpu = _Thread_Get_CPU( idle );
1137  _Thread_Set_CPU( the_thread, cpu );
1138  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
1139}
1140
1141/**
1142 * @brief Unblock this scheduler node.
1143 *
1144 * @param[in] context The scheduler instance context.
1145 * @param[in] the_thread The thread which wants to get unblocked.
1146 * @param[in] node The node which wants to get unblocked.
1147 * @param[in] is_scheduled This node is scheduled.
1148 * @param[in] release_idle_thread Function to release an idle thread.
1149 *
1150 * @retval true Continue with the unblocking operation.
1151 * @retval false Otherwise.
1152 */
1153RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1154  Scheduler_Context             *context,
1155  Thread_Control                *the_thread,
1156  Scheduler_Node                *node,
1157  bool                           is_scheduled,
1158  Scheduler_Release_idle_thread  release_idle_thread
1159)
1160{
1161  bool unblock;
1162
1163  ++node->sticky_level;
1164  _Assert( node->sticky_level > 0 );
1165
1166  if ( is_scheduled ) {
1167    _Scheduler_Discard_idle_thread(
1168      context,
1169      the_thread,
1170      node,
1171      release_idle_thread
1172    );
1173    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1174    unblock = false;
1175  } else {
1176    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1177    unblock = true;
1178  }
1179
1180  return unblock;
1181}
1182#endif
1183
1184RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1185  Thread_Control *new_heir,
1186  bool            force_dispatch
1187)
1188{
1189  Thread_Control *heir = _Thread_Heir;
1190
1191  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1192#if defined(RTEMS_SMP)
1193    /*
1194     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1195     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1196     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1197     * schedulers.
1198     */
1199    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1200    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1201#endif
1202    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1203    _Thread_Heir = new_heir;
1204    _Thread_Dispatch_necessary = true;
1205  }
1206}
1207
1208RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1209  const Scheduler_Control *new_scheduler,
1210  Thread_Control          *the_thread,
1211  Priority_Control         priority
1212)
1213{
1214  Scheduler_Node *new_scheduler_node;
1215  Scheduler_Node *old_scheduler_node;
1216
1217  if (
1218    _Thread_Owns_resources( the_thread )
1219      || the_thread->Wait.queue != NULL
1220  ) {
1221    return STATUS_RESOURCE_IN_USE;
1222  }
1223
1224  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1225  _Priority_Plain_extract(
1226    &old_scheduler_node->Wait.Priority,
1227    &the_thread->Real_priority
1228  );
1229
1230  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1231    _Priority_Plain_insert(
1232      &old_scheduler_node->Wait.Priority,
1233      &the_thread->Real_priority,
1234      the_thread->Real_priority.priority
1235    );
1236    return STATUS_RESOURCE_IN_USE;
1237  }
1238
1239#if defined(RTEMS_SMP)
1240  _Thread_Scheduler_process_requests( the_thread );
1241  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1242    the_thread,
1243    _Scheduler_Get_index( new_scheduler )
1244  );
1245#else
1246  new_scheduler_node = old_scheduler_node;
1247#endif
1248
1249  the_thread->Start.initial_priority = priority;
1250  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1251  _Priority_Initialize_one(
1252    &new_scheduler_node->Wait.Priority,
1253    &the_thread->Real_priority
1254  );
1255
1256#if defined(RTEMS_SMP)
1257  {
1258    const Scheduler_Control *old_scheduler;
1259
1260    old_scheduler = _Thread_Scheduler_get_home( the_thread );
1261
1262    if ( old_scheduler != new_scheduler ) {
1263      States_Control current_state;
1264
1265      current_state = the_thread->current_state;
1266
1267      if ( _States_Is_ready( current_state ) ) {
1268        _Scheduler_Block( the_thread );
1269      }
1270
1271      _Assert( old_scheduler_node->sticky_level == 0 );
1272      _Assert( new_scheduler_node->sticky_level == 0 );
1273
1274      _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1275      _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1276      _Chain_Initialize_one(
1277        &the_thread->Scheduler.Wait_nodes,
1278        &new_scheduler_node->Thread.Wait_node
1279      );
1280      _Chain_Extract_unprotected(
1281        &old_scheduler_node->Thread.Scheduler_node.Chain
1282      );
1283      _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1284      _Chain_Initialize_one(
1285        &the_thread->Scheduler.Scheduler_nodes,
1286        &new_scheduler_node->Thread.Scheduler_node.Chain
1287      );
1288
1289      the_thread->Scheduler.home = new_scheduler;
1290      _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1291
1292      if ( _States_Is_ready( current_state ) ) {
1293        _Scheduler_Unblock( the_thread );
1294      }
1295
1296      return STATUS_SUCCESSFUL;
1297    }
1298  }
1299#endif
1300
1301  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1302  _Scheduler_Update_priority( the_thread );
1303  return STATUS_SUCCESSFUL;
1304}
1305
1306/** @} */
1307
1308#ifdef __cplusplus
1309}
1310#endif
1311
1312#endif
1313/* end of include file */
Note: See TracBrowser for help on using the repository browser.