source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 6771359f

5
Last change on this file since 6771359f was 6771359f, checked in by Sebastian Huber <sebastian.huber@…>, on 10/27/16 at 04:42:06

score: Second part of new MrsP implementation

Update #2556.

  • Property mode set to 100644
File size: 36.7 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2016 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/cpusetimpl.h>
26#include <rtems/score/priorityimpl.h>
27#include <rtems/score/smpimpl.h>
28#include <rtems/score/status.h>
29#include <rtems/score/threadimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35/**
36 * @addtogroup ScoreScheduler
37 */
38/**@{**/
39
40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
57  const Thread_Control *the_thread
58)
59{
60#if defined(RTEMS_SMP)
61  return the_thread->Scheduler.control;
62#else
63  (void) the_thread;
64
65  return &_Scheduler_Table[ 0 ];
66#endif
67}
68
69RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
70  const Thread_Control *the_thread
71)
72{
73#if defined(RTEMS_SMP)
74  return the_thread->Scheduler.own_control;
75#else
76  (void) the_thread;
77
78  return &_Scheduler_Table[ 0 ];
79#endif
80}
81
82RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
83  uint32_t cpu_index
84)
85{
86#if defined(RTEMS_SMP)
87  return _Scheduler_Assignments[ cpu_index ].scheduler;
88#else
89  (void) cpu_index;
90
91  return &_Scheduler_Table[ 0 ];
92#endif
93}
94
95RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
96  const Per_CPU_Control *cpu
97)
98{
99  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
100
101  return _Scheduler_Get_by_CPU_index( cpu_index );
102}
103
104/**
105 * @brief Acquires the scheduler instance inside a critical section (interrupts
106 * disabled).
107 *
108 * @param[in] scheduler The scheduler instance.
109 * @param[in] lock_context The lock context to use for
110 *   _Scheduler_Release_critical().
111 */
112RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
113  const Scheduler_Control *scheduler,
114  ISR_lock_Context        *lock_context
115)
116{
117#if defined(RTEMS_SMP)
118  Scheduler_Context *context;
119
120  context = _Scheduler_Get_context( scheduler );
121  _ISR_lock_Acquire( &context->Lock, lock_context );
122#else
123  (void) scheduler;
124  (void) lock_context;
125#endif
126}
127
128/**
129 * @brief Releases the scheduler instance inside a critical section (interrupts
130 * disabled).
131 *
132 * @param[in] scheduler The scheduler instance.
133 * @param[in] lock_context The lock context used for
134 *   _Scheduler_Acquire_critical().
135 */
136RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
137  const Scheduler_Control *scheduler,
138  ISR_lock_Context        *lock_context
139)
140{
141#if defined(RTEMS_SMP)
142  Scheduler_Context *context;
143
144  context = _Scheduler_Get_context( scheduler );
145  _ISR_lock_Release( &context->Lock, lock_context );
146#else
147  (void) scheduler;
148  (void) lock_context;
149#endif
150}
151
152RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
153  const Thread_Control *the_thread
154)
155{
156#if defined(RTEMS_SMP)
157  return the_thread->Scheduler.node;
158#else
159  return the_thread->Scheduler.nodes;
160#endif
161}
162
163/**
164 * The preferred method to add a new scheduler is to define the jump table
165 * entries and add a case to the _Scheduler_Initialize routine.
166 *
167 * Generic scheduling implementations that rely on the ready queue only can
168 * be found in the _Scheduler_queue_XXX functions.
169 */
170
171/*
172 * Passing the Scheduler_Control* to these functions allows for multiple
173 * scheduler's to exist simultaneously, which could be useful on an SMP
174 * system.  Then remote Schedulers may be accessible.  How to protect such
175 * accesses remains an open problem.
176 */
177
178/**
179 * @brief General scheduling decision.
180 *
181 * This kernel routine implements the scheduling decision logic for
182 * the scheduler. It does NOT dispatch.
183 *
184 * @param[in] the_thread The thread which state changed previously.
185 */
186RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
187{
188  const Scheduler_Control *scheduler;
189  ISR_lock_Context         lock_context;
190
191  scheduler = _Scheduler_Get( the_thread );
192  _Scheduler_Acquire_critical( scheduler, &lock_context );
193
194  ( *scheduler->Operations.schedule )( scheduler, the_thread );
195
196  _Scheduler_Release_critical( scheduler, &lock_context );
197}
198
199/**
200 * @brief Scheduler yield with a particular thread.
201 *
202 * This routine is invoked when a thread wishes to voluntarily transfer control
203 * of the processor to another thread.
204 *
205 * @param[in] the_thread The yielding thread.
206 */
207RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
208{
209#if defined(RTEMS_SMP)
210  Chain_Node              *node;
211  const Chain_Node        *tail;
212  Scheduler_Node          *scheduler_node;
213  const Scheduler_Control *scheduler;
214  ISR_lock_Context         lock_context;
215  Thread_Control          *needs_help;
216
217  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
218  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
219
220  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
221  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
222
223  _Scheduler_Acquire_critical( scheduler, &lock_context );
224  needs_help = ( *scheduler->Operations.yield )(
225    scheduler,
226    the_thread,
227    _Thread_Scheduler_get_home_node( the_thread )
228  );
229  _Scheduler_Release_critical( scheduler, &lock_context );
230
231  if ( needs_help != the_thread ) {
232    return;
233  }
234
235  node = _Chain_Next( node );
236
237  while ( node != tail ) {
238    bool success;
239
240    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
241    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
242
243    _Scheduler_Acquire_critical( scheduler, &lock_context );
244    success = ( *scheduler->Operations.ask_for_help )(
245      scheduler,
246      the_thread,
247      scheduler_node
248    );
249    _Scheduler_Release_critical( scheduler, &lock_context );
250
251    if ( success ) {
252      break;
253    }
254
255    node = _Chain_Next( node );
256  }
257#else
258  const Scheduler_Control *scheduler;
259
260  scheduler = _Scheduler_Get( the_thread );
261  ( *scheduler->Operations.yield )(
262    scheduler,
263    the_thread,
264    _Thread_Scheduler_get_home_node( the_thread )
265  );
266#endif
267}
268
269/**
270 * @brief Blocks a thread with respect to the scheduler.
271 *
272 * This routine removes @a the_thread from the scheduling decision for
273 * the scheduler. The primary task is to remove the thread from the
274 * ready queue.  It performs any necessary schedulering operations
275 * including the selection of a new heir thread.
276 *
277 * @param[in] the_thread The thread.
278 */
279RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
280{
281#if defined(RTEMS_SMP)
282  Chain_Node              *node;
283  const Chain_Node        *tail;
284  Scheduler_Node          *scheduler_node;
285  const Scheduler_Control *scheduler;
286  ISR_lock_Context         lock_context;
287
288  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
289  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
290
291  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
292  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
293
294  _Scheduler_Acquire_critical( scheduler, &lock_context );
295  ( *scheduler->Operations.block )(
296    scheduler,
297    the_thread,
298    scheduler_node
299  );
300  _Scheduler_Release_critical( scheduler, &lock_context );
301
302  node = _Chain_Next( node );
303
304  while ( node != tail ) {
305    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
306    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
307
308    _Scheduler_Acquire_critical( scheduler, &lock_context );
309    ( *scheduler->Operations.withdraw_node )(
310      scheduler,
311      the_thread,
312      scheduler_node,
313      THREAD_SCHEDULER_BLOCKED
314    );
315    _Scheduler_Release_critical( scheduler, &lock_context );
316
317    node = _Chain_Next( node );
318  }
319#else
320  const Scheduler_Control *scheduler;
321
322  scheduler = _Scheduler_Get( the_thread );
323  ( *scheduler->Operations.block )(
324    scheduler,
325    the_thread,
326    _Thread_Scheduler_get_home_node( the_thread )
327  );
328#endif
329}
330
331/**
332 * @brief Unblocks a thread with respect to the scheduler.
333 *
334 * This operation must fetch the latest thread priority value for this
335 * scheduler instance and update its internal state if necessary.
336 *
337 * @param[in] the_thread The thread.
338 *
339 * @see _Scheduler_Node_get_priority().
340 */
341RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
342{
343#if defined(RTEMS_SMP)
344  Chain_Node              *node;
345  const Chain_Node        *tail;
346  Scheduler_Node          *scheduler_node;
347  const Scheduler_Control *scheduler;
348  ISR_lock_Context         lock_context;
349  Thread_Control          *needs_help;
350
351  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
352  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
353
354  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
355  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
356
357  _Scheduler_Acquire_critical( scheduler, &lock_context );
358  needs_help = ( *scheduler->Operations.unblock )(
359    scheduler,
360    the_thread,
361    scheduler_node
362  );
363  _Scheduler_Release_critical( scheduler, &lock_context );
364
365  if ( needs_help != the_thread ) {
366    return;
367  }
368
369  node = _Chain_Next( node );
370
371  while ( node != tail ) {
372    bool success;
373
374    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
375    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
376
377    _Scheduler_Acquire_critical( scheduler, &lock_context );
378    success = ( *scheduler->Operations.ask_for_help )(
379      scheduler,
380      the_thread,
381      scheduler_node
382    );
383    _Scheduler_Release_critical( scheduler, &lock_context );
384
385    if ( success ) {
386      break;
387    }
388
389    node = _Chain_Next( node );
390  }
391#else
392  const Scheduler_Control *scheduler;
393
394  scheduler = _Scheduler_Get( the_thread );
395  ( *scheduler->Operations.unblock )(
396    scheduler,
397    the_thread,
398    _Thread_Scheduler_get_home_node( the_thread )
399  );
400#endif
401}
402
403/**
404 * @brief Propagates a priority change of a thread to the scheduler.
405 *
406 * On uni-processor configurations, this operation must evaluate the thread
407 * state.  In case the thread is not ready, then the priority update should be
408 * deferred to the next scheduler unblock operation.
409 *
410 * The operation must update the heir and thread dispatch necessary variables
411 * in case the set of scheduled threads changes.
412 *
413 * @param[in] the_thread The thread changing its priority.
414 *
415 * @see _Scheduler_Node_get_priority().
416 */
417RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
418{
419#if defined(RTEMS_SMP)
420  Chain_Node       *node;
421  const Chain_Node *tail;
422
423  _Thread_Scheduler_process_requests( the_thread );
424
425  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
426  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
427
428  do {
429    Scheduler_Node          *scheduler_node;
430    const Scheduler_Control *scheduler;
431    ISR_lock_Context         lock_context;
432
433    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
434    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
435
436    _Scheduler_Acquire_critical( scheduler, &lock_context );
437    ( *scheduler->Operations.update_priority )(
438      scheduler,
439      the_thread,
440      scheduler_node
441    );
442    _Scheduler_Release_critical( scheduler, &lock_context );
443
444    node = _Chain_Next( node );
445  } while ( node != tail );
446#else
447  const Scheduler_Control *scheduler;
448
449  scheduler = _Scheduler_Get( the_thread );
450  ( *scheduler->Operations.update_priority )(
451    scheduler,
452    the_thread,
453    _Thread_Scheduler_get_home_node( the_thread )
454  );
455#endif
456}
457
458#if defined(RTEMS_SMP)
459/**
460 * @brief Changes the sticky level of the home scheduler node and propagates a
461 * priority change of a thread to the scheduler.
462 *
463 * @param[in] the_thread The thread changing its priority or sticky level.
464 *
465 * @see _Scheduler_Update_priority().
466 */
467RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
468  Thread_Control *the_thread,
469  int             sticky_level_change
470)
471{
472  Chain_Node              *node;
473  const Chain_Node        *tail;
474  Scheduler_Node          *scheduler_node;
475  const Scheduler_Control *scheduler;
476  ISR_lock_Context         lock_context;
477
478  _Thread_Scheduler_process_requests( the_thread );
479
480  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
481  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
482  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
483
484  _Scheduler_Acquire_critical( scheduler, &lock_context );
485
486  scheduler_node->sticky_level += sticky_level_change;
487  _Assert( scheduler_node->sticky_level >= 0 );
488
489  ( *scheduler->Operations.update_priority )(
490    scheduler,
491    the_thread,
492    scheduler_node
493  );
494
495  _Scheduler_Release_critical( scheduler, &lock_context );
496
497  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
498  node = _Chain_Next( node );
499
500  while ( node != tail ) {
501    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
502    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
503
504    _Scheduler_Acquire_critical( scheduler, &lock_context );
505    ( *scheduler->Operations.update_priority )(
506      scheduler,
507      the_thread,
508      scheduler_node
509    );
510    _Scheduler_Release_critical( scheduler, &lock_context );
511
512    node = _Chain_Next( node );
513  }
514}
515#endif
516
517/**
518 * @brief Maps a thread priority from the user domain to the scheduler domain.
519 *
520 * Let M be the maximum scheduler priority.  The mapping must be bijective in
521 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
522 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
523 * other values the mapping is undefined.
524 *
525 * @param[in] scheduler The scheduler instance.
526 * @param[in] priority The user domain thread priority.
527 *
528 * @return The corresponding thread priority of the scheduler domain is returned.
529 */
530RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
531  const Scheduler_Control *scheduler,
532  Priority_Control         priority
533)
534{
535  return ( *scheduler->Operations.map_priority )( scheduler, priority );
536}
537
538/**
539 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
540 *
541 * @param[in] scheduler The scheduler instance.
542 * @param[in] priority The scheduler domain thread priority.
543 *
544 * @return The corresponding thread priority of the user domain is returned.
545 */
546RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
547  const Scheduler_Control *scheduler,
548  Priority_Control         priority
549)
550{
551  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
552}
553
554/**
555 * @brief Initializes a scheduler node.
556 *
557 * The scheduler node contains arbitrary data on function entry.  The caller
558 * must ensure that _Scheduler_Node_destroy() will be called after a
559 * _Scheduler_Node_initialize() before the memory of the scheduler node is
560 * destroyed.
561 *
562 * @param[in] scheduler The scheduler instance.
563 * @param[in] node The scheduler node to initialize.
564 * @param[in] the_thread The thread of the scheduler node to initialize.
565 * @param[in] priority The thread priority.
566 */
567RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
568  const Scheduler_Control *scheduler,
569  Scheduler_Node          *node,
570  Thread_Control          *the_thread,
571  Priority_Control         priority
572)
573{
574  ( *scheduler->Operations.node_initialize )(
575    scheduler,
576    node,
577    the_thread,
578    priority
579  );
580}
581
582/**
583 * @brief Destroys a scheduler node.
584 *
585 * The caller must ensure that _Scheduler_Node_destroy() will be called only
586 * after a corresponding _Scheduler_Node_initialize().
587 *
588 * @param[in] scheduler The scheduler instance.
589 * @param[in] node The scheduler node to destroy.
590 */
591RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
592  const Scheduler_Control *scheduler,
593  Scheduler_Node          *node
594)
595{
596  ( *scheduler->Operations.node_destroy )( scheduler, node );
597}
598
599/**
600 * @brief Releases a job of a thread with respect to the scheduler.
601 *
602 * @param[in] the_thread The thread.
603 * @param[in] priority_node The priority node of the job.
604 * @param[in] deadline The deadline in watchdog ticks since boot.
605 * @param[in] queue_context The thread queue context to provide the set of
606 *   threads for _Thread_Priority_update().
607 */
608RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
609  Thread_Control       *the_thread,
610  Priority_Node        *priority_node,
611  uint64_t              deadline,
612  Thread_queue_Context *queue_context
613)
614{
615  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
616
617  _Thread_queue_Context_clear_priority_updates( queue_context );
618  ( *scheduler->Operations.release_job )(
619    scheduler,
620    the_thread,
621    priority_node,
622    deadline,
623    queue_context
624  );
625}
626
627/**
628 * @brief Cancels a job of a thread with respect to the scheduler.
629 *
630 * @param[in] the_thread The thread.
631 * @param[in] priority_node The priority node of the job.
632 * @param[in] queue_context The thread queue context to provide the set of
633 *   threads for _Thread_Priority_update().
634 */
635RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
636  Thread_Control       *the_thread,
637  Priority_Node        *priority_node,
638  Thread_queue_Context *queue_context
639)
640{
641  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
642
643  _Thread_queue_Context_clear_priority_updates( queue_context );
644  ( *scheduler->Operations.cancel_job )(
645    scheduler,
646    the_thread,
647    priority_node,
648    queue_context
649  );
650}
651
652/**
653 * @brief Scheduler method invoked at each clock tick.
654 *
655 * This method is invoked at each clock tick to allow the scheduler
656 * implementation to perform any activities required.  For the
657 * scheduler which support standard RTEMS features, this includes
658 * time-slicing management.
659 */
660RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
661{
662  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
663  Thread_Control *executing = cpu->executing;
664
665  if ( scheduler != NULL && executing != NULL ) {
666    ( *scheduler->Operations.tick )( scheduler, executing );
667  }
668}
669
670/**
671 * @brief Starts the idle thread for a particular processor.
672 *
673 * @param[in] scheduler The scheduler instance.
674 * @param[in,out] the_thread The idle thread for the processor.
675 * @param[in,out] cpu The processor for the idle thread.
676 *
677 * @see _Thread_Create_idle().
678 */
679RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
680  const Scheduler_Control *scheduler,
681  Thread_Control          *the_thread,
682  Per_CPU_Control         *cpu
683)
684{
685  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
686}
687
688#if defined(RTEMS_SMP)
689RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
690  uint32_t cpu_index
691)
692{
693  return &_Scheduler_Assignments[ cpu_index ];
694}
695
696RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
697  const Scheduler_Assignment *assignment
698)
699{
700  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
701}
702
703RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
704  const Scheduler_Assignment *assignment
705)
706{
707  return assignment->scheduler != NULL;
708}
709#endif /* defined(RTEMS_SMP) */
710
711RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
712  const Scheduler_Control *scheduler,
713  uint32_t cpu_index
714)
715{
716#if defined(RTEMS_SMP)
717  const Scheduler_Assignment *assignment =
718    _Scheduler_Get_assignment( cpu_index );
719
720  return assignment->scheduler == scheduler;
721#else
722  (void) scheduler;
723  (void) cpu_index;
724
725  return true;
726#endif
727}
728
729#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
730
731RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
732  const Scheduler_Control *scheduler,
733  size_t                   cpusetsize,
734  cpu_set_t               *cpuset
735)
736{
737  uint32_t cpu_count = _SMP_Get_processor_count();
738  uint32_t cpu_index;
739
740  CPU_ZERO_S( cpusetsize, cpuset );
741
742  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
743#if defined(RTEMS_SMP)
744    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
745      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
746    }
747#else
748    (void) scheduler;
749
750    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
751#endif
752  }
753}
754
755RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
756  const Scheduler_Control *scheduler,
757  Thread_Control          *the_thread,
758  size_t                   cpusetsize,
759  cpu_set_t               *cpuset
760)
761{
762  (void) the_thread;
763
764  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
765
766  return true;
767}
768
769bool _Scheduler_Get_affinity(
770  Thread_Control *the_thread,
771  size_t          cpusetsize,
772  cpu_set_t      *cpuset
773);
774
775RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
776  const Scheduler_Control *scheduler,
777  Thread_Control          *the_thread,
778  size_t                   cpusetsize,
779  const cpu_set_t         *cpuset
780)
781{
782  uint32_t cpu_count = _SMP_Get_processor_count();
783  uint32_t cpu_index;
784  bool     ok = true;
785
786  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
787#if defined(RTEMS_SMP)
788    const Scheduler_Control *scheduler_of_cpu =
789      _Scheduler_Get_by_CPU_index( cpu_index );
790
791    ok = ok
792      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
793        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
794          && scheduler != scheduler_of_cpu ) );
795#else
796    (void) scheduler;
797
798    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
799#endif
800  }
801
802  return ok;
803}
804
805bool _Scheduler_Set_affinity(
806  Thread_Control  *the_thread,
807  size_t           cpusetsize,
808  const cpu_set_t *cpuset
809);
810
811#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
812
813RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
814  const Scheduler_Control *scheduler,
815  Thread_Control          *the_thread,
816  Scheduler_Node          *node,
817  void                  ( *extract )(
818                             const Scheduler_Control *,
819                             Thread_Control *,
820                             Scheduler_Node *
821                        ),
822  void                  ( *schedule )(
823                             const Scheduler_Control *,
824                             Thread_Control *,
825                             bool
826                        )
827)
828{
829  ( *extract )( scheduler, the_thread, node );
830
831  /* TODO: flash critical section? */
832
833  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
834    ( *schedule )( scheduler, the_thread, true );
835  }
836}
837
838RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
839  const Scheduler_Control *scheduler
840)
841{
842#if defined(RTEMS_SMP)
843  return _Scheduler_Get_context( scheduler )->processor_count;
844#else
845  (void) scheduler;
846
847  return 1;
848#endif
849}
850
851RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
852{
853  return _Objects_Build_id(
854    OBJECTS_FAKE_OBJECTS_API,
855    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
856    _Objects_Local_node,
857    (uint16_t) ( scheduler_index + 1 )
858  );
859}
860
861RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
862{
863  uint32_t minimum_id = _Scheduler_Build_id( 0 );
864
865  return id - minimum_id;
866}
867
868RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
869  Objects_Id                id,
870  const Scheduler_Control **scheduler_p
871)
872{
873  uint32_t index = _Scheduler_Get_index_by_id( id );
874  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
875
876  *scheduler_p = scheduler;
877
878  return index < _Scheduler_Count
879    && _Scheduler_Get_processor_count( scheduler ) > 0;
880}
881
882RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
883{
884  const Scheduler_Control *scheduler;
885  bool ok = _Scheduler_Get_by_id( id, &scheduler );
886
887  (void) scheduler;
888
889  return ok;
890}
891
892RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
893  const Scheduler_Control *scheduler
894)
895{
896  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
897}
898
899RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
900  Thread_Control   *the_thread,
901  Priority_Control  new_priority,
902  bool              prepend_it
903)
904{
905  Scheduler_Node *scheduler_node;
906
907  scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
908  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
909}
910
911#if defined(RTEMS_SMP)
912/**
913 * @brief Gets an idle thread from the scheduler instance.
914 *
915 * @param[in] context The scheduler instance context.
916 *
917 * @retval idle An idle thread for use.  This function must always return an
918 * idle thread.  If none is available, then this is a fatal error.
919 */
920typedef Thread_Control *( *Scheduler_Get_idle_thread )(
921  Scheduler_Context *context
922);
923
924/**
925 * @brief Releases an idle thread to the scheduler instance for reuse.
926 *
927 * @param[in] context The scheduler instance context.
928 * @param[in] idle The idle thread to release
929 */
930typedef void ( *Scheduler_Release_idle_thread )(
931  Scheduler_Context *context,
932  Thread_Control    *idle
933);
934
935extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
936
937RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
938  Thread_Control         *the_thread,
939  Thread_Scheduler_state  new_state
940)
941{
942  _Assert(
943    _Scheduler_Thread_state_valid_state_changes
944      [ the_thread->Scheduler.state ][ new_state ]
945  );
946  _Assert(
947    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
948      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
949      || !_System_state_Is_up( _System_state_Get() )
950  );
951
952  the_thread->Scheduler.state = new_state;
953}
954
955RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
956  Scheduler_Node *node,
957  Thread_Control *idle
958)
959{
960  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
961  _Assert(
962    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
963  );
964
965  _Scheduler_Node_set_user( node, idle );
966  node->idle = idle;
967}
968
969/**
970 * @brief Use an idle thread for this scheduler node.
971 *
972 * A thread those home scheduler node has a sticky level greater than zero may
973 * use an idle thread in the home scheduler instance in case it executes
974 * currently in another scheduler instance or in case it is in a blocking
975 * state.
976 *
977 * @param[in] context The scheduler instance context.
978 * @param[in] node The node which wants to use the idle thread.
979 * @param[in] cpu The processor for the idle thread.
980 * @param[in] get_idle_thread Function to get an idle thread.
981 */
982RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
983  Scheduler_Context         *context,
984  Scheduler_Node            *node,
985  Per_CPU_Control           *cpu,
986  Scheduler_Get_idle_thread  get_idle_thread
987)
988{
989  Thread_Control *idle = ( *get_idle_thread )( context );
990
991  _Scheduler_Set_idle_thread( node, idle );
992  _Thread_Set_CPU( idle, cpu );
993  return idle;
994}
995
996typedef enum {
997  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
998  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
999  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
1000} Scheduler_Try_to_schedule_action;
1001
1002/**
1003 * @brief Try to schedule this scheduler node.
1004 *
1005 * @param[in] context The scheduler instance context.
1006 * @param[in] node The node which wants to get scheduled.
1007 * @param[in] idle A potential idle thread used by a potential victim node.
1008 * @param[in] get_idle_thread Function to get an idle thread.
1009 *
1010 * @retval true This node can be scheduled.
1011 * @retval false Otherwise.
1012 */
1013RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
1014_Scheduler_Try_to_schedule_node(
1015  Scheduler_Context         *context,
1016  Scheduler_Node            *node,
1017  Thread_Control            *idle,
1018  Scheduler_Get_idle_thread  get_idle_thread
1019)
1020{
1021  ISR_lock_Context                  lock_context;
1022  Scheduler_Try_to_schedule_action  action;
1023  Thread_Control                   *user;
1024
1025  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
1026  user = _Scheduler_Node_get_user( node );
1027
1028  _Thread_Scheduler_acquire_critical( user, &lock_context );
1029
1030  if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1031    _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) );
1032    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1033  } else if (
1034    user->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1035      || node->sticky_level == 0
1036  ) {
1037    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1038  } else if ( idle != NULL ) {
1039    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1040  } else {
1041    _Scheduler_Use_idle_thread(
1042      context,
1043      node,
1044      _Thread_Get_CPU( user ),
1045      get_idle_thread
1046    );
1047  }
1048
1049  _Thread_Scheduler_release_critical( user, &lock_context );
1050  return action;
1051}
1052
1053/**
1054 * @brief Release an idle thread using this scheduler node.
1055 *
1056 * @param[in] context The scheduler instance context.
1057 * @param[in] node The node which may have an idle thread as user.
1058 * @param[in] release_idle_thread Function to release an idle thread.
1059 *
1060 * @retval idle The idle thread which used this node.
1061 * @retval NULL This node had no idle thread as an user.
1062 */
1063RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1064  Scheduler_Context             *context,
1065  Scheduler_Node                *node,
1066  Scheduler_Release_idle_thread  release_idle_thread
1067)
1068{
1069  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1070
1071  if ( idle != NULL ) {
1072    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1073
1074    node->idle = NULL;
1075    _Scheduler_Node_set_user( node, owner );
1076    ( *release_idle_thread )( context, idle );
1077  }
1078
1079  return idle;
1080}
1081
1082RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1083  Scheduler_Node *needs_idle,
1084  Scheduler_Node *uses_idle,
1085  Thread_Control *idle
1086)
1087{
1088  uses_idle->idle = NULL;
1089  _Scheduler_Node_set_user(
1090    uses_idle,
1091    _Scheduler_Node_get_owner( uses_idle )
1092  );
1093  _Scheduler_Set_idle_thread( needs_idle, idle );
1094}
1095
1096/**
1097 * @brief Block this scheduler node.
1098 *
1099 * @param[in] context The scheduler instance context.
1100 * @param[in] thread The thread which wants to get blocked referencing this
1101 *   node.  This is not necessarily the user of this node in case the node
1102 *   participates in the scheduler helping protocol.
1103 * @param[in] node The node which wants to get blocked.
1104 * @param[in] is_scheduled This node is scheduled.
1105 * @param[in] get_idle_thread Function to get an idle thread.
1106 *
1107 * @retval thread_cpu The processor of the thread.  Indicates to continue with
1108 *   the blocking operation.
1109 * @retval NULL Otherwise.
1110 */
1111RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
1112  Scheduler_Context         *context,
1113  Thread_Control            *thread,
1114  Scheduler_Node            *node,
1115  bool                       is_scheduled,
1116  Scheduler_Get_idle_thread  get_idle_thread
1117)
1118{
1119  int               sticky_level;
1120  ISR_lock_Context  lock_context;
1121  Per_CPU_Control  *thread_cpu;
1122
1123  sticky_level = node->sticky_level;
1124  --sticky_level;
1125  node->sticky_level = sticky_level;
1126  _Assert( sticky_level >= 0 );
1127
1128  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1129  thread_cpu = _Thread_Get_CPU( thread );
1130  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
1131  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1132  _Thread_Scheduler_release_critical( thread, &lock_context );
1133
1134  if ( sticky_level > 0 ) {
1135    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
1136      Thread_Control *idle;
1137
1138      idle = _Scheduler_Use_idle_thread(
1139        context,
1140        node,
1141        thread_cpu,
1142        get_idle_thread
1143      );
1144      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
1145    }
1146
1147    return NULL;
1148  }
1149
1150  _Assert( thread == _Scheduler_Node_get_user( node ) );
1151  return thread_cpu;
1152}
1153
1154RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
1155  Scheduler_Context             *context,
1156  Thread_Control                *the_thread,
1157  Scheduler_Node                *node,
1158  Scheduler_Release_idle_thread  release_idle_thread
1159)
1160{
1161  Thread_Control  *idle;
1162  Thread_Control  *owner;
1163  Per_CPU_Control *cpu;
1164
1165  idle = _Scheduler_Node_get_idle( node );
1166  owner = _Scheduler_Node_get_owner( node );
1167
1168  node->idle = NULL;
1169  _Assert( _Scheduler_Node_get_user( node ) == idle );
1170  _Scheduler_Node_set_user( node, owner );
1171  ( *release_idle_thread )( context, idle );
1172
1173  cpu = _Thread_Get_CPU( idle );
1174  _Thread_Set_CPU( the_thread, cpu );
1175  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
1176}
1177
1178/**
1179 * @brief Unblock this scheduler node.
1180 *
1181 * @param[in] context The scheduler instance context.
1182 * @param[in] the_thread The thread which wants to get unblocked.
1183 * @param[in] node The node which wants to get unblocked.
1184 * @param[in] is_scheduled This node is scheduled.
1185 * @param[in] release_idle_thread Function to release an idle thread.
1186 *
1187 * @retval true Continue with the unblocking operation.
1188 * @retval false Otherwise.
1189 */
1190RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1191  Scheduler_Context             *context,
1192  Thread_Control                *the_thread,
1193  Scheduler_Node                *node,
1194  bool                           is_scheduled,
1195  Scheduler_Release_idle_thread  release_idle_thread
1196)
1197{
1198  bool unblock;
1199
1200  ++node->sticky_level;
1201  _Assert( node->sticky_level > 0 );
1202
1203  if ( is_scheduled ) {
1204    _Scheduler_Discard_idle_thread(
1205      context,
1206      the_thread,
1207      node,
1208      release_idle_thread
1209    );
1210    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1211    unblock = false;
1212  } else {
1213    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1214    unblock = true;
1215  }
1216
1217  return unblock;
1218}
1219#endif
1220
1221RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1222  Thread_Control *new_heir,
1223  bool            force_dispatch
1224)
1225{
1226  Thread_Control *heir = _Thread_Heir;
1227
1228  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1229#if defined(RTEMS_SMP)
1230    /*
1231     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1232     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1233     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1234     * schedulers.
1235     */
1236    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1237    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1238#endif
1239    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1240    _Thread_Heir = new_heir;
1241    _Thread_Dispatch_necessary = true;
1242  }
1243}
1244
1245RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1246  const Scheduler_Control *new_scheduler,
1247  Thread_Control          *the_thread,
1248  Priority_Control         priority
1249)
1250{
1251  Scheduler_Node *new_scheduler_node;
1252  Scheduler_Node *old_scheduler_node;
1253
1254  if (
1255    _Thread_Owns_resources( the_thread )
1256      || the_thread->Wait.queue != NULL
1257  ) {
1258    return STATUS_RESOURCE_IN_USE;
1259  }
1260
1261  old_scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1262  _Priority_Plain_extract(
1263    &old_scheduler_node->Wait.Priority,
1264    &the_thread->Real_priority
1265  );
1266
1267  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1268    _Priority_Plain_insert(
1269      &old_scheduler_node->Wait.Priority,
1270      &the_thread->Real_priority,
1271      the_thread->Real_priority.priority
1272    );
1273    return STATUS_RESOURCE_IN_USE;
1274  }
1275
1276#if defined(RTEMS_SMP)
1277  _Thread_Scheduler_process_requests( the_thread );
1278  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1279    the_thread,
1280    _Scheduler_Get_index( new_scheduler )
1281  );
1282#else
1283  new_scheduler_node = old_scheduler_node;
1284#endif
1285
1286  the_thread->Start.initial_priority = priority;
1287  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1288  _Priority_Initialize_one(
1289    &new_scheduler_node->Wait.Priority,
1290    &the_thread->Real_priority
1291  );
1292
1293#if defined(RTEMS_SMP)
1294  {
1295    const Scheduler_Control *old_scheduler;
1296
1297    old_scheduler = _Scheduler_Get( the_thread );
1298
1299    if ( old_scheduler != new_scheduler ) {
1300      States_Control current_state;
1301
1302      current_state = the_thread->current_state;
1303
1304      if ( _States_Is_ready( current_state ) ) {
1305        _Scheduler_Block( the_thread );
1306      }
1307
1308      _Assert( old_scheduler_node->sticky_level == 0 );
1309      _Assert( new_scheduler_node->sticky_level == 0 );
1310
1311      _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1312      _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1313      _Chain_Initialize_one(
1314        &the_thread->Scheduler.Wait_nodes,
1315        &new_scheduler_node->Thread.Wait_node
1316      );
1317      _Chain_Extract_unprotected(
1318        &old_scheduler_node->Thread.Scheduler_node.Chain
1319      );
1320      _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1321      _Chain_Initialize_one(
1322        &the_thread->Scheduler.Scheduler_nodes,
1323        &new_scheduler_node->Thread.Scheduler_node.Chain
1324      );
1325
1326      the_thread->Scheduler.own_control = new_scheduler;
1327      the_thread->Scheduler.control = new_scheduler;
1328      the_thread->Scheduler.own_node = new_scheduler_node;
1329      the_thread->Scheduler.node = new_scheduler_node;
1330      _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1331
1332      if ( _States_Is_ready( current_state ) ) {
1333        _Scheduler_Unblock( the_thread );
1334      }
1335
1336      return STATUS_SUCCESSFUL;
1337    }
1338  }
1339#endif
1340
1341  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1342  _Scheduler_Update_priority( the_thread );
1343  return STATUS_SUCCESSFUL;
1344}
1345
1346/** @} */
1347
1348#ifdef __cplusplus
1349}
1350#endif
1351
1352#endif
1353/* end of include file */
Note: See TracBrowser for help on using the repository browser.