source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 7f742432

5
Last change on this file since 7f742432 was 7f742432, checked in by Sebastian Huber <sebastian.huber@…>, on Oct 31, 2016 at 7:22:02 AM

score: Delete Thread_Scheduler_control::own_node

Update #2556.

  • Property mode set to 100644
File size: 36.3 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/cpusetimpl.h>
26#include <rtems/score/priorityimpl.h>
27#include <rtems/score/smpimpl.h>
28#include <rtems/score/status.h>
29#include <rtems/score/threadimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35/**
36 * @addtogroup ScoreScheduler
37 */
38/**@{**/
39
40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
57  const Thread_Control *the_thread
58)
59{
60#if defined(RTEMS_SMP)
61  return the_thread->Scheduler.control;
62#else
63  (void) the_thread;
64
65  return &_Scheduler_Table[ 0 ];
66#endif
67}
68
69RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
70  const Thread_Control *the_thread
71)
72{
73#if defined(RTEMS_SMP)
74  return the_thread->Scheduler.own_control;
75#else
76  (void) the_thread;
77
78  return &_Scheduler_Table[ 0 ];
79#endif
80}
81
82RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
83  uint32_t cpu_index
84)
85{
86#if defined(RTEMS_SMP)
87  return _Scheduler_Assignments[ cpu_index ].scheduler;
88#else
89  (void) cpu_index;
90
91  return &_Scheduler_Table[ 0 ];
92#endif
93}
94
95RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
96  const Per_CPU_Control *cpu
97)
98{
99  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
100
101  return _Scheduler_Get_by_CPU_index( cpu_index );
102}
103
104/**
105 * @brief Acquires the scheduler instance inside a critical section (interrupts
106 * disabled).
107 *
108 * @param[in] scheduler The scheduler instance.
109 * @param[in] lock_context The lock context to use for
110 *   _Scheduler_Release_critical().
111 */
112RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
113  const Scheduler_Control *scheduler,
114  ISR_lock_Context        *lock_context
115)
116{
117#if defined(RTEMS_SMP)
118  Scheduler_Context *context;
119
120  context = _Scheduler_Get_context( scheduler );
121  _ISR_lock_Acquire( &context->Lock, lock_context );
122#else
123  (void) scheduler;
124  (void) lock_context;
125#endif
126}
127
128/**
129 * @brief Releases the scheduler instance inside a critical section (interrupts
130 * disabled).
131 *
132 * @param[in] scheduler The scheduler instance.
133 * @param[in] lock_context The lock context used for
134 *   _Scheduler_Acquire_critical().
135 */
136RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
137  const Scheduler_Control *scheduler,
138  ISR_lock_Context        *lock_context
139)
140{
141#if defined(RTEMS_SMP)
142  Scheduler_Context *context;
143
144  context = _Scheduler_Get_context( scheduler );
145  _ISR_lock_Release( &context->Lock, lock_context );
146#else
147  (void) scheduler;
148  (void) lock_context;
149#endif
150}
151
152/**
153 * The preferred method to add a new scheduler is to define the jump table
154 * entries and add a case to the _Scheduler_Initialize routine.
155 *
156 * Generic scheduling implementations that rely on the ready queue only can
157 * be found in the _Scheduler_queue_XXX functions.
158 */
159
160/*
161 * Passing the Scheduler_Control* to these functions allows for multiple
162 * scheduler's to exist simultaneously, which could be useful on an SMP
163 * system.  Then remote Schedulers may be accessible.  How to protect such
164 * accesses remains an open problem.
165 */
166
167/**
168 * @brief General scheduling decision.
169 *
170 * This kernel routine implements the scheduling decision logic for
171 * the scheduler. It does NOT dispatch.
172 *
173 * @param[in] the_thread The thread which state changed previously.
174 */
175RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
176{
177  const Scheduler_Control *scheduler;
178  ISR_lock_Context         lock_context;
179
180  scheduler = _Scheduler_Get( the_thread );
181  _Scheduler_Acquire_critical( scheduler, &lock_context );
182
183  ( *scheduler->Operations.schedule )( scheduler, the_thread );
184
185  _Scheduler_Release_critical( scheduler, &lock_context );
186}
187
188/**
189 * @brief Scheduler yield with a particular thread.
190 *
191 * This routine is invoked when a thread wishes to voluntarily transfer control
192 * of the processor to another thread.
193 *
194 * @param[in] the_thread The yielding thread.
195 */
196RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
197{
198#if defined(RTEMS_SMP)
199  Chain_Node              *node;
200  const Chain_Node        *tail;
201  Scheduler_Node          *scheduler_node;
202  const Scheduler_Control *scheduler;
203  ISR_lock_Context         lock_context;
204  Thread_Control          *needs_help;
205
206  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
207  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
208
209  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
210  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
211
212  _Scheduler_Acquire_critical( scheduler, &lock_context );
213  needs_help = ( *scheduler->Operations.yield )(
214    scheduler,
215    the_thread,
216    _Thread_Scheduler_get_home_node( the_thread )
217  );
218  _Scheduler_Release_critical( scheduler, &lock_context );
219
220  if ( needs_help != the_thread ) {
221    return;
222  }
223
224  node = _Chain_Next( node );
225
226  while ( node != tail ) {
227    bool success;
228
229    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
230    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
231
232    _Scheduler_Acquire_critical( scheduler, &lock_context );
233    success = ( *scheduler->Operations.ask_for_help )(
234      scheduler,
235      the_thread,
236      scheduler_node
237    );
238    _Scheduler_Release_critical( scheduler, &lock_context );
239
240    if ( success ) {
241      break;
242    }
243
244    node = _Chain_Next( node );
245  }
246#else
247  const Scheduler_Control *scheduler;
248
249  scheduler = _Scheduler_Get( the_thread );
250  ( *scheduler->Operations.yield )(
251    scheduler,
252    the_thread,
253    _Thread_Scheduler_get_home_node( the_thread )
254  );
255#endif
256}
257
258/**
259 * @brief Blocks a thread with respect to the scheduler.
260 *
261 * This routine removes @a the_thread from the scheduling decision for
262 * the scheduler. The primary task is to remove the thread from the
263 * ready queue.  It performs any necessary schedulering operations
264 * including the selection of a new heir thread.
265 *
266 * @param[in] the_thread The thread.
267 */
268RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
269{
270#if defined(RTEMS_SMP)
271  Chain_Node              *node;
272  const Chain_Node        *tail;
273  Scheduler_Node          *scheduler_node;
274  const Scheduler_Control *scheduler;
275  ISR_lock_Context         lock_context;
276
277  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
278  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
279
280  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
281  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
282
283  _Scheduler_Acquire_critical( scheduler, &lock_context );
284  ( *scheduler->Operations.block )(
285    scheduler,
286    the_thread,
287    scheduler_node
288  );
289  _Scheduler_Release_critical( scheduler, &lock_context );
290
291  node = _Chain_Next( node );
292
293  while ( node != tail ) {
294    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
295    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
296
297    _Scheduler_Acquire_critical( scheduler, &lock_context );
298    ( *scheduler->Operations.withdraw_node )(
299      scheduler,
300      the_thread,
301      scheduler_node,
302      THREAD_SCHEDULER_BLOCKED
303    );
304    _Scheduler_Release_critical( scheduler, &lock_context );
305
306    node = _Chain_Next( node );
307  }
308#else
309  const Scheduler_Control *scheduler;
310
311  scheduler = _Scheduler_Get( the_thread );
312  ( *scheduler->Operations.block )(
313    scheduler,
314    the_thread,
315    _Thread_Scheduler_get_home_node( the_thread )
316  );
317#endif
318}
319
320/**
321 * @brief Unblocks a thread with respect to the scheduler.
322 *
323 * This operation must fetch the latest thread priority value for this
324 * scheduler instance and update its internal state if necessary.
325 *
326 * @param[in] the_thread The thread.
327 *
328 * @see _Scheduler_Node_get_priority().
329 */
330RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
331{
332#if defined(RTEMS_SMP)
333  Chain_Node              *node;
334  const Chain_Node        *tail;
335  Scheduler_Node          *scheduler_node;
336  const Scheduler_Control *scheduler;
337  ISR_lock_Context         lock_context;
338  Thread_Control          *needs_help;
339
340  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
341  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
342
343  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
344  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
345
346  _Scheduler_Acquire_critical( scheduler, &lock_context );
347  needs_help = ( *scheduler->Operations.unblock )(
348    scheduler,
349    the_thread,
350    scheduler_node
351  );
352  _Scheduler_Release_critical( scheduler, &lock_context );
353
354  if ( needs_help != the_thread ) {
355    return;
356  }
357
358  node = _Chain_Next( node );
359
360  while ( node != tail ) {
361    bool success;
362
363    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
364    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
365
366    _Scheduler_Acquire_critical( scheduler, &lock_context );
367    success = ( *scheduler->Operations.ask_for_help )(
368      scheduler,
369      the_thread,
370      scheduler_node
371    );
372    _Scheduler_Release_critical( scheduler, &lock_context );
373
374    if ( success ) {
375      break;
376    }
377
378    node = _Chain_Next( node );
379  }
380#else
381  const Scheduler_Control *scheduler;
382
383  scheduler = _Scheduler_Get( the_thread );
384  ( *scheduler->Operations.unblock )(
385    scheduler,
386    the_thread,
387    _Thread_Scheduler_get_home_node( the_thread )
388  );
389#endif
390}
391
392/**
393 * @brief Propagates a priority change of a thread to the scheduler.
394 *
395 * On uni-processor configurations, this operation must evaluate the thread
396 * state.  In case the thread is not ready, then the priority update should be
397 * deferred to the next scheduler unblock operation.
398 *
399 * The operation must update the heir and thread dispatch necessary variables
400 * in case the set of scheduled threads changes.
401 *
402 * @param[in] the_thread The thread changing its priority.
403 *
404 * @see _Scheduler_Node_get_priority().
405 */
406RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
407{
408#if defined(RTEMS_SMP)
409  Chain_Node       *node;
410  const Chain_Node *tail;
411
412  _Thread_Scheduler_process_requests( the_thread );
413
414  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
415  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
416
417  do {
418    Scheduler_Node          *scheduler_node;
419    const Scheduler_Control *scheduler;
420    ISR_lock_Context         lock_context;
421
422    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
423    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
424
425    _Scheduler_Acquire_critical( scheduler, &lock_context );
426    ( *scheduler->Operations.update_priority )(
427      scheduler,
428      the_thread,
429      scheduler_node
430    );
431    _Scheduler_Release_critical( scheduler, &lock_context );
432
433    node = _Chain_Next( node );
434  } while ( node != tail );
435#else
436  const Scheduler_Control *scheduler;
437
438  scheduler = _Scheduler_Get( the_thread );
439  ( *scheduler->Operations.update_priority )(
440    scheduler,
441    the_thread,
442    _Thread_Scheduler_get_home_node( the_thread )
443  );
444#endif
445}
446
447#if defined(RTEMS_SMP)
448/**
449 * @brief Changes the sticky level of the home scheduler node and propagates a
450 * priority change of a thread to the scheduler.
451 *
452 * @param[in] the_thread The thread changing its priority or sticky level.
453 *
454 * @see _Scheduler_Update_priority().
455 */
456RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
457  Thread_Control *the_thread,
458  int             sticky_level_change
459)
460{
461  Chain_Node              *node;
462  const Chain_Node        *tail;
463  Scheduler_Node          *scheduler_node;
464  const Scheduler_Control *scheduler;
465  ISR_lock_Context         lock_context;
466
467  _Thread_Scheduler_process_requests( the_thread );
468
469  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
470  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
471  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
472
473  _Scheduler_Acquire_critical( scheduler, &lock_context );
474
475  scheduler_node->sticky_level += sticky_level_change;
476  _Assert( scheduler_node->sticky_level >= 0 );
477
478  ( *scheduler->Operations.update_priority )(
479    scheduler,
480    the_thread,
481    scheduler_node
482  );
483
484  _Scheduler_Release_critical( scheduler, &lock_context );
485
486  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
487  node = _Chain_Next( node );
488
489  while ( node != tail ) {
490    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
491    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
492
493    _Scheduler_Acquire_critical( scheduler, &lock_context );
494    ( *scheduler->Operations.update_priority )(
495      scheduler,
496      the_thread,
497      scheduler_node
498    );
499    _Scheduler_Release_critical( scheduler, &lock_context );
500
501    node = _Chain_Next( node );
502  }
503}
504#endif
505
506/**
507 * @brief Maps a thread priority from the user domain to the scheduler domain.
508 *
509 * Let M be the maximum scheduler priority.  The mapping must be bijective in
510 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
511 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
512 * other values the mapping is undefined.
513 *
514 * @param[in] scheduler The scheduler instance.
515 * @param[in] priority The user domain thread priority.
516 *
517 * @return The corresponding thread priority of the scheduler domain is returned.
518 */
519RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
520  const Scheduler_Control *scheduler,
521  Priority_Control         priority
522)
523{
524  return ( *scheduler->Operations.map_priority )( scheduler, priority );
525}
526
527/**
528 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
529 *
530 * @param[in] scheduler The scheduler instance.
531 * @param[in] priority The scheduler domain thread priority.
532 *
533 * @return The corresponding thread priority of the user domain is returned.
534 */
535RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
536  const Scheduler_Control *scheduler,
537  Priority_Control         priority
538)
539{
540  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
541}
542
543/**
544 * @brief Initializes a scheduler node.
545 *
546 * The scheduler node contains arbitrary data on function entry.  The caller
547 * must ensure that _Scheduler_Node_destroy() will be called after a
548 * _Scheduler_Node_initialize() before the memory of the scheduler node is
549 * destroyed.
550 *
551 * @param[in] scheduler The scheduler instance.
552 * @param[in] node The scheduler node to initialize.
553 * @param[in] the_thread The thread of the scheduler node to initialize.
554 * @param[in] priority The thread priority.
555 */
556RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
557  const Scheduler_Control *scheduler,
558  Scheduler_Node          *node,
559  Thread_Control          *the_thread,
560  Priority_Control         priority
561)
562{
563  ( *scheduler->Operations.node_initialize )(
564    scheduler,
565    node,
566    the_thread,
567    priority
568  );
569}
570
571/**
572 * @brief Destroys a scheduler node.
573 *
574 * The caller must ensure that _Scheduler_Node_destroy() will be called only
575 * after a corresponding _Scheduler_Node_initialize().
576 *
577 * @param[in] scheduler The scheduler instance.
578 * @param[in] node The scheduler node to destroy.
579 */
580RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
581  const Scheduler_Control *scheduler,
582  Scheduler_Node          *node
583)
584{
585  ( *scheduler->Operations.node_destroy )( scheduler, node );
586}
587
588/**
589 * @brief Releases a job of a thread with respect to the scheduler.
590 *
591 * @param[in] the_thread The thread.
592 * @param[in] priority_node The priority node of the job.
593 * @param[in] deadline The deadline in watchdog ticks since boot.
594 * @param[in] queue_context The thread queue context to provide the set of
595 *   threads for _Thread_Priority_update().
596 */
597RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
598  Thread_Control       *the_thread,
599  Priority_Node        *priority_node,
600  uint64_t              deadline,
601  Thread_queue_Context *queue_context
602)
603{
604  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
605
606  _Thread_queue_Context_clear_priority_updates( queue_context );
607  ( *scheduler->Operations.release_job )(
608    scheduler,
609    the_thread,
610    priority_node,
611    deadline,
612    queue_context
613  );
614}
615
616/**
617 * @brief Cancels a job of a thread with respect to the scheduler.
618 *
619 * @param[in] the_thread The thread.
620 * @param[in] priority_node The priority node of the job.
621 * @param[in] queue_context The thread queue context to provide the set of
622 *   threads for _Thread_Priority_update().
623 */
624RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
625  Thread_Control       *the_thread,
626  Priority_Node        *priority_node,
627  Thread_queue_Context *queue_context
628)
629{
630  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
631
632  _Thread_queue_Context_clear_priority_updates( queue_context );
633  ( *scheduler->Operations.cancel_job )(
634    scheduler,
635    the_thread,
636    priority_node,
637    queue_context
638  );
639}
640
641/**
642 * @brief Scheduler method invoked at each clock tick.
643 *
644 * This method is invoked at each clock tick to allow the scheduler
645 * implementation to perform any activities required.  For the
646 * scheduler which support standard RTEMS features, this includes
647 * time-slicing management.
648 */
649RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
650{
651  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
652  Thread_Control *executing = cpu->executing;
653
654  if ( scheduler != NULL && executing != NULL ) {
655    ( *scheduler->Operations.tick )( scheduler, executing );
656  }
657}
658
659/**
660 * @brief Starts the idle thread for a particular processor.
661 *
662 * @param[in] scheduler The scheduler instance.
663 * @param[in,out] the_thread The idle thread for the processor.
664 * @param[in,out] cpu The processor for the idle thread.
665 *
666 * @see _Thread_Create_idle().
667 */
668RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
669  const Scheduler_Control *scheduler,
670  Thread_Control          *the_thread,
671  Per_CPU_Control         *cpu
672)
673{
674  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
675}
676
677#if defined(RTEMS_SMP)
678RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
679  uint32_t cpu_index
680)
681{
682  return &_Scheduler_Assignments[ cpu_index ];
683}
684
685RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
686  const Scheduler_Assignment *assignment
687)
688{
689  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
690}
691
692RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
693  const Scheduler_Assignment *assignment
694)
695{
696  return assignment->scheduler != NULL;
697}
698#endif /* defined(RTEMS_SMP) */
699
700RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
701  const Scheduler_Control *scheduler,
702  uint32_t cpu_index
703)
704{
705#if defined(RTEMS_SMP)
706  const Scheduler_Assignment *assignment =
707    _Scheduler_Get_assignment( cpu_index );
708
709  return assignment->scheduler == scheduler;
710#else
711  (void) scheduler;
712  (void) cpu_index;
713
714  return true;
715#endif
716}
717
718#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
719
720RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
721  const Scheduler_Control *scheduler,
722  size_t                   cpusetsize,
723  cpu_set_t               *cpuset
724)
725{
726  uint32_t cpu_count = _SMP_Get_processor_count();
727  uint32_t cpu_index;
728
729  CPU_ZERO_S( cpusetsize, cpuset );
730
731  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
732#if defined(RTEMS_SMP)
733    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
734      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
735    }
736#else
737    (void) scheduler;
738
739    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
740#endif
741  }
742}
743
744RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
745  const Scheduler_Control *scheduler,
746  Thread_Control          *the_thread,
747  size_t                   cpusetsize,
748  cpu_set_t               *cpuset
749)
750{
751  (void) the_thread;
752
753  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
754
755  return true;
756}
757
758bool _Scheduler_Get_affinity(
759  Thread_Control *the_thread,
760  size_t          cpusetsize,
761  cpu_set_t      *cpuset
762);
763
764RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
765  const Scheduler_Control *scheduler,
766  Thread_Control          *the_thread,
767  size_t                   cpusetsize,
768  const cpu_set_t         *cpuset
769)
770{
771  uint32_t cpu_count = _SMP_Get_processor_count();
772  uint32_t cpu_index;
773  bool     ok = true;
774
775  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
776#if defined(RTEMS_SMP)
777    const Scheduler_Control *scheduler_of_cpu =
778      _Scheduler_Get_by_CPU_index( cpu_index );
779
780    ok = ok
781      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
782        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
783          && scheduler != scheduler_of_cpu ) );
784#else
785    (void) scheduler;
786
787    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
788#endif
789  }
790
791  return ok;
792}
793
794bool _Scheduler_Set_affinity(
795  Thread_Control  *the_thread,
796  size_t           cpusetsize,
797  const cpu_set_t *cpuset
798);
799
800#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
801
802RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
803  const Scheduler_Control *scheduler,
804  Thread_Control          *the_thread,
805  Scheduler_Node          *node,
806  void                  ( *extract )(
807                             const Scheduler_Control *,
808                             Thread_Control *,
809                             Scheduler_Node *
810                        ),
811  void                  ( *schedule )(
812                             const Scheduler_Control *,
813                             Thread_Control *,
814                             bool
815                        )
816)
817{
818  ( *extract )( scheduler, the_thread, node );
819
820  /* TODO: flash critical section? */
821
822  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
823    ( *schedule )( scheduler, the_thread, true );
824  }
825}
826
827RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
828  const Scheduler_Control *scheduler
829)
830{
831#if defined(RTEMS_SMP)
832  return _Scheduler_Get_context( scheduler )->processor_count;
833#else
834  (void) scheduler;
835
836  return 1;
837#endif
838}
839
840RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
841{
842  return _Objects_Build_id(
843    OBJECTS_FAKE_OBJECTS_API,
844    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
845    _Objects_Local_node,
846    (uint16_t) ( scheduler_index + 1 )
847  );
848}
849
850RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
851{
852  uint32_t minimum_id = _Scheduler_Build_id( 0 );
853
854  return id - minimum_id;
855}
856
857RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
858  Objects_Id                id,
859  const Scheduler_Control **scheduler_p
860)
861{
862  uint32_t index = _Scheduler_Get_index_by_id( id );
863  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
864
865  *scheduler_p = scheduler;
866
867  return index < _Scheduler_Count
868    && _Scheduler_Get_processor_count( scheduler ) > 0;
869}
870
871RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
872{
873  const Scheduler_Control *scheduler;
874  bool ok = _Scheduler_Get_by_id( id, &scheduler );
875
876  (void) scheduler;
877
878  return ok;
879}
880
881RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
882  const Scheduler_Control *scheduler
883)
884{
885  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
886}
887
888RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
889  Thread_Control   *the_thread,
890  Priority_Control  new_priority,
891  bool              prepend_it
892)
893{
894  Scheduler_Node *scheduler_node;
895
896  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
897  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
898}
899
900#if defined(RTEMS_SMP)
901/**
902 * @brief Gets an idle thread from the scheduler instance.
903 *
904 * @param[in] context The scheduler instance context.
905 *
906 * @retval idle An idle thread for use.  This function must always return an
907 * idle thread.  If none is available, then this is a fatal error.
908 */
909typedef Thread_Control *( *Scheduler_Get_idle_thread )(
910  Scheduler_Context *context
911);
912
913/**
914 * @brief Releases an idle thread to the scheduler instance for reuse.
915 *
916 * @param[in] context The scheduler instance context.
917 * @param[in] idle The idle thread to release
918 */
919typedef void ( *Scheduler_Release_idle_thread )(
920  Scheduler_Context *context,
921  Thread_Control    *idle
922);
923
924extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
925
926RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
927  Thread_Control         *the_thread,
928  Thread_Scheduler_state  new_state
929)
930{
931  _Assert(
932    _Scheduler_Thread_state_valid_state_changes
933      [ the_thread->Scheduler.state ][ new_state ]
934  );
935  _Assert(
936    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
937      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
938      || !_System_state_Is_up( _System_state_Get() )
939  );
940
941  the_thread->Scheduler.state = new_state;
942}
943
944RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
945  Scheduler_Node *node,
946  Thread_Control *idle
947)
948{
949  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
950  _Assert(
951    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
952  );
953
954  _Scheduler_Node_set_user( node, idle );
955  node->idle = idle;
956}
957
958/**
959 * @brief Use an idle thread for this scheduler node.
960 *
961 * A thread those home scheduler node has a sticky level greater than zero may
962 * use an idle thread in the home scheduler instance in case it executes
963 * currently in another scheduler instance or in case it is in a blocking
964 * state.
965 *
966 * @param[in] context The scheduler instance context.
967 * @param[in] node The node which wants to use the idle thread.
968 * @param[in] cpu The processor for the idle thread.
969 * @param[in] get_idle_thread Function to get an idle thread.
970 */
971RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
972  Scheduler_Context         *context,
973  Scheduler_Node            *node,
974  Per_CPU_Control           *cpu,
975  Scheduler_Get_idle_thread  get_idle_thread
976)
977{
978  Thread_Control *idle = ( *get_idle_thread )( context );
979
980  _Scheduler_Set_idle_thread( node, idle );
981  _Thread_Set_CPU( idle, cpu );
982  return idle;
983}
984
985typedef enum {
986  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
987  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
988  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
989} Scheduler_Try_to_schedule_action;
990
991/**
992 * @brief Try to schedule this scheduler node.
993 *
994 * @param[in] context The scheduler instance context.
995 * @param[in] node The node which wants to get scheduled.
996 * @param[in] idle A potential idle thread used by a potential victim node.
997 * @param[in] get_idle_thread Function to get an idle thread.
998 *
999 * @retval true This node can be scheduled.
1000 * @retval false Otherwise.
1001 */
1002RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
1003_Scheduler_Try_to_schedule_node(
1004  Scheduler_Context         *context,
1005  Scheduler_Node            *node,
1006  Thread_Control            *idle,
1007  Scheduler_Get_idle_thread  get_idle_thread
1008)
1009{
1010  ISR_lock_Context                  lock_context;
1011  Scheduler_Try_to_schedule_action  action;
1012  Thread_Control                   *user;
1013
1014  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
1015  user = _Scheduler_Node_get_user( node );
1016
1017  _Thread_Scheduler_acquire_critical( user, &lock_context );
1018
1019  if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1020    _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) );
1021    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1022  } else if (
1023    user->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1024      || node->sticky_level == 0
1025  ) {
1026    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1027  } else if ( idle != NULL ) {
1028    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1029  } else {
1030    _Scheduler_Use_idle_thread(
1031      context,
1032      node,
1033      _Thread_Get_CPU( user ),
1034      get_idle_thread
1035    );
1036  }
1037
1038  _Thread_Scheduler_release_critical( user, &lock_context );
1039  return action;
1040}
1041
1042/**
1043 * @brief Release an idle thread using this scheduler node.
1044 *
1045 * @param[in] context The scheduler instance context.
1046 * @param[in] node The node which may have an idle thread as user.
1047 * @param[in] release_idle_thread Function to release an idle thread.
1048 *
1049 * @retval idle The idle thread which used this node.
1050 * @retval NULL This node had no idle thread as an user.
1051 */
1052RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1053  Scheduler_Context             *context,
1054  Scheduler_Node                *node,
1055  Scheduler_Release_idle_thread  release_idle_thread
1056)
1057{
1058  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1059
1060  if ( idle != NULL ) {
1061    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1062
1063    node->idle = NULL;
1064    _Scheduler_Node_set_user( node, owner );
1065    ( *release_idle_thread )( context, idle );
1066  }
1067
1068  return idle;
1069}
1070
1071RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1072  Scheduler_Node *needs_idle,
1073  Scheduler_Node *uses_idle,
1074  Thread_Control *idle
1075)
1076{
1077  uses_idle->idle = NULL;
1078  _Scheduler_Node_set_user(
1079    uses_idle,
1080    _Scheduler_Node_get_owner( uses_idle )
1081  );
1082  _Scheduler_Set_idle_thread( needs_idle, idle );
1083}
1084
1085/**
1086 * @brief Block this scheduler node.
1087 *
1088 * @param[in] context The scheduler instance context.
1089 * @param[in] thread The thread which wants to get blocked referencing this
1090 *   node.  This is not necessarily the user of this node in case the node
1091 *   participates in the scheduler helping protocol.
1092 * @param[in] node The node which wants to get blocked.
1093 * @param[in] is_scheduled This node is scheduled.
1094 * @param[in] get_idle_thread Function to get an idle thread.
1095 *
1096 * @retval thread_cpu The processor of the thread.  Indicates to continue with
1097 *   the blocking operation.
1098 * @retval NULL Otherwise.
1099 */
1100RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
1101  Scheduler_Context         *context,
1102  Thread_Control            *thread,
1103  Scheduler_Node            *node,
1104  bool                       is_scheduled,
1105  Scheduler_Get_idle_thread  get_idle_thread
1106)
1107{
1108  int               sticky_level;
1109  ISR_lock_Context  lock_context;
1110  Per_CPU_Control  *thread_cpu;
1111
1112  sticky_level = node->sticky_level;
1113  --sticky_level;
1114  node->sticky_level = sticky_level;
1115  _Assert( sticky_level >= 0 );
1116
1117  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1118  thread_cpu = _Thread_Get_CPU( thread );
1119  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
1120  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1121  _Thread_Scheduler_release_critical( thread, &lock_context );
1122
1123  if ( sticky_level > 0 ) {
1124    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
1125      Thread_Control *idle;
1126
1127      idle = _Scheduler_Use_idle_thread(
1128        context,
1129        node,
1130        thread_cpu,
1131        get_idle_thread
1132      );
1133      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
1134    }
1135
1136    return NULL;
1137  }
1138
1139  _Assert( thread == _Scheduler_Node_get_user( node ) );
1140  return thread_cpu;
1141}
1142
1143RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
1144  Scheduler_Context             *context,
1145  Thread_Control                *the_thread,
1146  Scheduler_Node                *node,
1147  Scheduler_Release_idle_thread  release_idle_thread
1148)
1149{
1150  Thread_Control  *idle;
1151  Thread_Control  *owner;
1152  Per_CPU_Control *cpu;
1153
1154  idle = _Scheduler_Node_get_idle( node );
1155  owner = _Scheduler_Node_get_owner( node );
1156
1157  node->idle = NULL;
1158  _Assert( _Scheduler_Node_get_user( node ) == idle );
1159  _Scheduler_Node_set_user( node, owner );
1160  ( *release_idle_thread )( context, idle );
1161
1162  cpu = _Thread_Get_CPU( idle );
1163  _Thread_Set_CPU( the_thread, cpu );
1164  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
1165}
1166
1167/**
1168 * @brief Unblock this scheduler node.
1169 *
1170 * @param[in] context The scheduler instance context.
1171 * @param[in] the_thread The thread which wants to get unblocked.
1172 * @param[in] node The node which wants to get unblocked.
1173 * @param[in] is_scheduled This node is scheduled.
1174 * @param[in] release_idle_thread Function to release an idle thread.
1175 *
1176 * @retval true Continue with the unblocking operation.
1177 * @retval false Otherwise.
1178 */
1179RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1180  Scheduler_Context             *context,
1181  Thread_Control                *the_thread,
1182  Scheduler_Node                *node,
1183  bool                           is_scheduled,
1184  Scheduler_Release_idle_thread  release_idle_thread
1185)
1186{
1187  bool unblock;
1188
1189  ++node->sticky_level;
1190  _Assert( node->sticky_level > 0 );
1191
1192  if ( is_scheduled ) {
1193    _Scheduler_Discard_idle_thread(
1194      context,
1195      the_thread,
1196      node,
1197      release_idle_thread
1198    );
1199    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1200    unblock = false;
1201  } else {
1202    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1203    unblock = true;
1204  }
1205
1206  return unblock;
1207}
1208#endif
1209
1210RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1211  Thread_Control *new_heir,
1212  bool            force_dispatch
1213)
1214{
1215  Thread_Control *heir = _Thread_Heir;
1216
1217  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1218#if defined(RTEMS_SMP)
1219    /*
1220     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1221     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1222     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1223     * schedulers.
1224     */
1225    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1226    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1227#endif
1228    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1229    _Thread_Heir = new_heir;
1230    _Thread_Dispatch_necessary = true;
1231  }
1232}
1233
1234RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1235  const Scheduler_Control *new_scheduler,
1236  Thread_Control          *the_thread,
1237  Priority_Control         priority
1238)
1239{
1240  Scheduler_Node *new_scheduler_node;
1241  Scheduler_Node *old_scheduler_node;
1242
1243  if (
1244    _Thread_Owns_resources( the_thread )
1245      || the_thread->Wait.queue != NULL
1246  ) {
1247    return STATUS_RESOURCE_IN_USE;
1248  }
1249
1250  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1251  _Priority_Plain_extract(
1252    &old_scheduler_node->Wait.Priority,
1253    &the_thread->Real_priority
1254  );
1255
1256  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1257    _Priority_Plain_insert(
1258      &old_scheduler_node->Wait.Priority,
1259      &the_thread->Real_priority,
1260      the_thread->Real_priority.priority
1261    );
1262    return STATUS_RESOURCE_IN_USE;
1263  }
1264
1265#if defined(RTEMS_SMP)
1266  _Thread_Scheduler_process_requests( the_thread );
1267  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1268    the_thread,
1269    _Scheduler_Get_index( new_scheduler )
1270  );
1271#else
1272  new_scheduler_node = old_scheduler_node;
1273#endif
1274
1275  the_thread->Start.initial_priority = priority;
1276  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1277  _Priority_Initialize_one(
1278    &new_scheduler_node->Wait.Priority,
1279    &the_thread->Real_priority
1280  );
1281
1282#if defined(RTEMS_SMP)
1283  {
1284    const Scheduler_Control *old_scheduler;
1285
1286    old_scheduler = _Scheduler_Get( the_thread );
1287
1288    if ( old_scheduler != new_scheduler ) {
1289      States_Control current_state;
1290
1291      current_state = the_thread->current_state;
1292
1293      if ( _States_Is_ready( current_state ) ) {
1294        _Scheduler_Block( the_thread );
1295      }
1296
1297      _Assert( old_scheduler_node->sticky_level == 0 );
1298      _Assert( new_scheduler_node->sticky_level == 0 );
1299
1300      _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1301      _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1302      _Chain_Initialize_one(
1303        &the_thread->Scheduler.Wait_nodes,
1304        &new_scheduler_node->Thread.Wait_node
1305      );
1306      _Chain_Extract_unprotected(
1307        &old_scheduler_node->Thread.Scheduler_node.Chain
1308      );
1309      _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1310      _Chain_Initialize_one(
1311        &the_thread->Scheduler.Scheduler_nodes,
1312        &new_scheduler_node->Thread.Scheduler_node.Chain
1313      );
1314
1315      the_thread->Scheduler.own_control = new_scheduler;
1316      the_thread->Scheduler.control = new_scheduler;
1317      _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1318
1319      if ( _States_Is_ready( current_state ) ) {
1320        _Scheduler_Unblock( the_thread );
1321      }
1322
1323      return STATUS_SUCCESSFUL;
1324    }
1325  }
1326#endif
1327
1328  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1329  _Scheduler_Update_priority( the_thread );
1330  return STATUS_SUCCESSFUL;
1331}
1332
1333/** @} */
1334
1335#ifdef __cplusplus
1336}
1337#endif
1338
1339#endif
1340/* end of include file */
Note: See TracBrowser for help on using the repository browser.