source: rtems/cpukit/include/rtems/score/schedulerimpl.h @ e2d575c2

5
Last change on this file since e2d575c2 was 4c20da4b, checked in by Sebastian Huber <sebastian.huber@…>, on 04/04/19 at 07:18:11

doxygen: Rename Score* groups in RTEMSScore*

Update #3706

  • Property mode set to 100644
File size: 33.9 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2017 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/priorityimpl.h>
26#include <rtems/score/smpimpl.h>
27#include <rtems/score/status.h>
28#include <rtems/score/threadimpl.h>
29
30#ifdef __cplusplus
31extern "C" {
32#endif
33
34/**
35 * @addtogroup RTEMSScoreScheduler
36 */
37/**@{**/
38
39/**
40 * @brief Maps a priority value to support the append indicator.
41 */
42#define SCHEDULER_PRIORITY_MAP( priority ) ( ( priority ) << 1 )
43
44/**
45 * @brief Returns the plain priority value.
46 */
47#define SCHEDULER_PRIORITY_UNMAP( priority ) ( ( priority ) >> 1 )
48
49/**
50 * @brief Clears the priority append indicator bit.
51 */
52#define SCHEDULER_PRIORITY_PURIFY( priority )  \
53  ( ( priority ) & ~( (Priority_Control) SCHEDULER_PRIORITY_APPEND_FLAG ) )
54
55/**
56 * @brief Returns the priority control with the append indicator bit set.
57 */
58#define SCHEDULER_PRIORITY_APPEND( priority )  \
59  ( ( priority ) | SCHEDULER_PRIORITY_APPEND_FLAG )
60
61/**
62 * @brief Returns true, if the item should be appended to its priority group,
63 * otherwise returns false and the item should be prepended to its priority
64 * group.
65 */
66#define SCHEDULER_PRIORITY_IS_APPEND( priority ) \
67  ( ( ( priority ) & SCHEDULER_PRIORITY_APPEND_FLAG ) != 0 )
68
69/**
70 *  @brief Initializes the scheduler to the policy chosen by the user.
71 *
72 *  This routine initializes the scheduler to the policy chosen by the user
73 *  through confdefs, or to the priority scheduler with ready chains by
74 *  default.
75 */
76void _Scheduler_Handler_initialization( void );
77
78RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
79  const Scheduler_Control *scheduler
80)
81{
82  return scheduler->context;
83}
84
85RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
86  const Per_CPU_Control *cpu
87)
88{
89#if defined(RTEMS_SMP)
90  return cpu->Scheduler.control;
91#else
92  (void) cpu;
93  return &_Scheduler_Table[ 0 ];
94#endif
95}
96
97/**
98 * @brief Acquires the scheduler instance inside a critical section (interrupts
99 * disabled).
100 *
101 * @param[in] scheduler The scheduler instance.
102 * @param[in] lock_context The lock context to use for
103 *   _Scheduler_Release_critical().
104 */
105RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
106  const Scheduler_Control *scheduler,
107  ISR_lock_Context        *lock_context
108)
109{
110#if defined(RTEMS_SMP)
111  Scheduler_Context *context;
112
113  context = _Scheduler_Get_context( scheduler );
114  _ISR_lock_Acquire( &context->Lock, lock_context );
115#else
116  (void) scheduler;
117  (void) lock_context;
118#endif
119}
120
121/**
122 * @brief Releases the scheduler instance inside a critical section (interrupts
123 * disabled).
124 *
125 * @param[in] scheduler The scheduler instance.
126 * @param[in] lock_context The lock context used for
127 *   _Scheduler_Acquire_critical().
128 */
129RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
130  const Scheduler_Control *scheduler,
131  ISR_lock_Context        *lock_context
132)
133{
134#if defined(RTEMS_SMP)
135  Scheduler_Context *context;
136
137  context = _Scheduler_Get_context( scheduler );
138  _ISR_lock_Release( &context->Lock, lock_context );
139#else
140  (void) scheduler;
141  (void) lock_context;
142#endif
143}
144
145#if defined(RTEMS_SMP)
146void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
147
148/**
149 * @brief Registers an ask for help request if necessary.
150 *
151 * The actual ask for help operation is carried out during
152 * _Thread_Do_dispatch() on a processor related to the thread.  This yields a
153 * better separation of scheduler instances.  A thread of one scheduler
154 * instance should not be forced to carry out too much work for threads on
155 * other scheduler instances.
156 *
157 * @param[in] the_thread The thread in need for help.
158 */
159RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
160{
161  _Assert( _Thread_State_is_owner( the_thread ) );
162
163  if ( the_thread->Scheduler.helping_nodes > 0 ) {
164    _Scheduler_Request_ask_for_help( the_thread );
165  }
166}
167#endif
168
169/**
170 * The preferred method to add a new scheduler is to define the jump table
171 * entries and add a case to the _Scheduler_Initialize routine.
172 *
173 * Generic scheduling implementations that rely on the ready queue only can
174 * be found in the _Scheduler_queue_XXX functions.
175 */
176
177/*
178 * Passing the Scheduler_Control* to these functions allows for multiple
179 * scheduler's to exist simultaneously, which could be useful on an SMP
180 * system.  Then remote Schedulers may be accessible.  How to protect such
181 * accesses remains an open problem.
182 */
183
184/**
185 * @brief General scheduling decision.
186 *
187 * This kernel routine implements the scheduling decision logic for
188 * the scheduler. It does NOT dispatch.
189 *
190 * @param[in] the_thread The thread which state changed previously.
191 */
192RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
193{
194  const Scheduler_Control *scheduler;
195  ISR_lock_Context         lock_context;
196
197  scheduler = _Thread_Scheduler_get_home( the_thread );
198  _Scheduler_Acquire_critical( scheduler, &lock_context );
199
200  ( *scheduler->Operations.schedule )( scheduler, the_thread );
201
202  _Scheduler_Release_critical( scheduler, &lock_context );
203}
204
205/**
206 * @brief Scheduler yield with a particular thread.
207 *
208 * This routine is invoked when a thread wishes to voluntarily transfer control
209 * of the processor to another thread.
210 *
211 * @param[in] the_thread The yielding thread.
212 */
213RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
214{
215  const Scheduler_Control *scheduler;
216  ISR_lock_Context         lock_context;
217
218  scheduler = _Thread_Scheduler_get_home( the_thread );
219  _Scheduler_Acquire_critical( scheduler, &lock_context );
220  ( *scheduler->Operations.yield )(
221    scheduler,
222    the_thread,
223    _Thread_Scheduler_get_home_node( the_thread )
224  );
225  _Scheduler_Release_critical( scheduler, &lock_context );
226}
227
228/**
229 * @brief Blocks a thread with respect to the scheduler.
230 *
231 * This routine removes @a the_thread from the scheduling decision for
232 * the scheduler. The primary task is to remove the thread from the
233 * ready queue.  It performs any necessary schedulering operations
234 * including the selection of a new heir thread.
235 *
236 * @param[in] the_thread The thread.
237 */
238RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
239{
240#if defined(RTEMS_SMP)
241  Chain_Node              *node;
242  const Chain_Node        *tail;
243  Scheduler_Node          *scheduler_node;
244  const Scheduler_Control *scheduler;
245  ISR_lock_Context         lock_context;
246
247  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
248  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
249
250  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
251  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
252
253  _Scheduler_Acquire_critical( scheduler, &lock_context );
254  ( *scheduler->Operations.block )(
255    scheduler,
256    the_thread,
257    scheduler_node
258  );
259  _Scheduler_Release_critical( scheduler, &lock_context );
260
261  node = _Chain_Next( node );
262
263  while ( node != tail ) {
264    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
265    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
266
267    _Scheduler_Acquire_critical( scheduler, &lock_context );
268    ( *scheduler->Operations.withdraw_node )(
269      scheduler,
270      the_thread,
271      scheduler_node,
272      THREAD_SCHEDULER_BLOCKED
273    );
274    _Scheduler_Release_critical( scheduler, &lock_context );
275
276    node = _Chain_Next( node );
277  }
278#else
279  const Scheduler_Control *scheduler;
280
281  scheduler = _Thread_Scheduler_get_home( the_thread );
282  ( *scheduler->Operations.block )(
283    scheduler,
284    the_thread,
285    _Thread_Scheduler_get_home_node( the_thread )
286  );
287#endif
288}
289
290/**
291 * @brief Unblocks a thread with respect to the scheduler.
292 *
293 * This operation must fetch the latest thread priority value for this
294 * scheduler instance and update its internal state if necessary.
295 *
296 * @param[in] the_thread The thread.
297 *
298 * @see _Scheduler_Node_get_priority().
299 */
300RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
301{
302  Scheduler_Node          *scheduler_node;
303  const Scheduler_Control *scheduler;
304  ISR_lock_Context         lock_context;
305
306#if defined(RTEMS_SMP)
307  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE(
308    _Chain_First( &the_thread->Scheduler.Scheduler_nodes )
309  );
310  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
311#else
312  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
313  scheduler = _Thread_Scheduler_get_home( the_thread );
314#endif
315
316  _Scheduler_Acquire_critical( scheduler, &lock_context );
317  ( *scheduler->Operations.unblock )( scheduler, the_thread, scheduler_node );
318  _Scheduler_Release_critical( scheduler, &lock_context );
319}
320
321/**
322 * @brief Propagates a priority change of a thread to the scheduler.
323 *
324 * On uni-processor configurations, this operation must evaluate the thread
325 * state.  In case the thread is not ready, then the priority update should be
326 * deferred to the next scheduler unblock operation.
327 *
328 * The operation must update the heir and thread dispatch necessary variables
329 * in case the set of scheduled threads changes.
330 *
331 * @param[in] the_thread The thread changing its priority.
332 *
333 * @see _Scheduler_Node_get_priority().
334 */
335RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
336{
337#if defined(RTEMS_SMP)
338  Chain_Node       *node;
339  const Chain_Node *tail;
340
341  _Thread_Scheduler_process_requests( the_thread );
342
343  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
344  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
345
346  do {
347    Scheduler_Node          *scheduler_node;
348    const Scheduler_Control *scheduler;
349    ISR_lock_Context         lock_context;
350
351    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
352    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
353
354    _Scheduler_Acquire_critical( scheduler, &lock_context );
355    ( *scheduler->Operations.update_priority )(
356      scheduler,
357      the_thread,
358      scheduler_node
359    );
360    _Scheduler_Release_critical( scheduler, &lock_context );
361
362    node = _Chain_Next( node );
363  } while ( node != tail );
364#else
365  const Scheduler_Control *scheduler;
366
367  scheduler = _Thread_Scheduler_get_home( the_thread );
368  ( *scheduler->Operations.update_priority )(
369    scheduler,
370    the_thread,
371    _Thread_Scheduler_get_home_node( the_thread )
372  );
373#endif
374}
375
376#if defined(RTEMS_SMP)
377/**
378 * @brief Changes the sticky level of the home scheduler node and propagates a
379 * priority change of a thread to the scheduler.
380 *
381 * @param[in] the_thread The thread changing its priority or sticky level.
382 *
383 * @see _Scheduler_Update_priority().
384 */
385RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
386  Thread_Control *the_thread,
387  int             sticky_level_change
388)
389{
390  Chain_Node              *node;
391  const Chain_Node        *tail;
392  Scheduler_Node          *scheduler_node;
393  const Scheduler_Control *scheduler;
394  ISR_lock_Context         lock_context;
395
396  _Thread_Scheduler_process_requests( the_thread );
397
398  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
399  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
400  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
401
402  _Scheduler_Acquire_critical( scheduler, &lock_context );
403
404  scheduler_node->sticky_level += sticky_level_change;
405  _Assert( scheduler_node->sticky_level >= 0 );
406
407  ( *scheduler->Operations.update_priority )(
408    scheduler,
409    the_thread,
410    scheduler_node
411  );
412
413  _Scheduler_Release_critical( scheduler, &lock_context );
414
415  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
416  node = _Chain_Next( node );
417
418  while ( node != tail ) {
419    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
420    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
421
422    _Scheduler_Acquire_critical( scheduler, &lock_context );
423    ( *scheduler->Operations.update_priority )(
424      scheduler,
425      the_thread,
426      scheduler_node
427    );
428    _Scheduler_Release_critical( scheduler, &lock_context );
429
430    node = _Chain_Next( node );
431  }
432}
433#endif
434
435/**
436 * @brief Maps a thread priority from the user domain to the scheduler domain.
437 *
438 * Let M be the maximum scheduler priority.  The mapping must be bijective in
439 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
440 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
441 * other values the mapping is undefined.
442 *
443 * @param[in] scheduler The scheduler instance.
444 * @param[in] priority The user domain thread priority.
445 *
446 * @return The corresponding thread priority of the scheduler domain is returned.
447 */
448RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
449  const Scheduler_Control *scheduler,
450  Priority_Control         priority
451)
452{
453  return ( *scheduler->Operations.map_priority )( scheduler, priority );
454}
455
456/**
457 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
458 *
459 * @param[in] scheduler The scheduler instance.
460 * @param[in] priority The scheduler domain thread priority.
461 *
462 * @return The corresponding thread priority of the user domain is returned.
463 */
464RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
465  const Scheduler_Control *scheduler,
466  Priority_Control         priority
467)
468{
469  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
470}
471
472/**
473 * @brief Initializes a scheduler node.
474 *
475 * The scheduler node contains arbitrary data on function entry.  The caller
476 * must ensure that _Scheduler_Node_destroy() will be called after a
477 * _Scheduler_Node_initialize() before the memory of the scheduler node is
478 * destroyed.
479 *
480 * @param[in] scheduler The scheduler instance.
481 * @param[in] node The scheduler node to initialize.
482 * @param[in] the_thread The thread of the scheduler node to initialize.
483 * @param[in] priority The thread priority.
484 */
485RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
486  const Scheduler_Control *scheduler,
487  Scheduler_Node          *node,
488  Thread_Control          *the_thread,
489  Priority_Control         priority
490)
491{
492  ( *scheduler->Operations.node_initialize )(
493    scheduler,
494    node,
495    the_thread,
496    priority
497  );
498}
499
500/**
501 * @brief Destroys a scheduler node.
502 *
503 * The caller must ensure that _Scheduler_Node_destroy() will be called only
504 * after a corresponding _Scheduler_Node_initialize().
505 *
506 * @param[in] scheduler The scheduler instance.
507 * @param[in] node The scheduler node to destroy.
508 */
509RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
510  const Scheduler_Control *scheduler,
511  Scheduler_Node          *node
512)
513{
514  ( *scheduler->Operations.node_destroy )( scheduler, node );
515}
516
517/**
518 * @brief Releases a job of a thread with respect to the scheduler.
519 *
520 * @param[in] the_thread The thread.
521 * @param[in] priority_node The priority node of the job.
522 * @param[in] deadline The deadline in watchdog ticks since boot.
523 * @param[in] queue_context The thread queue context to provide the set of
524 *   threads for _Thread_Priority_update().
525 */
526RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
527  Thread_Control       *the_thread,
528  Priority_Node        *priority_node,
529  uint64_t              deadline,
530  Thread_queue_Context *queue_context
531)
532{
533  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
534
535  _Thread_queue_Context_clear_priority_updates( queue_context );
536  ( *scheduler->Operations.release_job )(
537    scheduler,
538    the_thread,
539    priority_node,
540    deadline,
541    queue_context
542  );
543}
544
545/**
546 * @brief Cancels a job of a thread with respect to the scheduler.
547 *
548 * @param[in] the_thread The thread.
549 * @param[in] priority_node The priority node of the job.
550 * @param[in] queue_context The thread queue context to provide the set of
551 *   threads for _Thread_Priority_update().
552 */
553RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
554  Thread_Control       *the_thread,
555  Priority_Node        *priority_node,
556  Thread_queue_Context *queue_context
557)
558{
559  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
560
561  _Thread_queue_Context_clear_priority_updates( queue_context );
562  ( *scheduler->Operations.cancel_job )(
563    scheduler,
564    the_thread,
565    priority_node,
566    queue_context
567  );
568}
569
570/**
571 * @brief Scheduler method invoked at each clock tick.
572 *
573 * This method is invoked at each clock tick to allow the scheduler
574 * implementation to perform any activities required.  For the
575 * scheduler which support standard RTEMS features, this includes
576 * time-slicing management.
577 */
578RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
579{
580  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
581  Thread_Control *executing = cpu->executing;
582
583  if ( scheduler != NULL && executing != NULL ) {
584    ( *scheduler->Operations.tick )( scheduler, executing );
585  }
586}
587
588/**
589 * @brief Starts the idle thread for a particular processor.
590 *
591 * @param[in] scheduler The scheduler instance.
592 * @param[in,out] the_thread The idle thread for the processor.
593 * @param[in,out] cpu The processor for the idle thread.
594 *
595 * @see _Thread_Create_idle().
596 */
597RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
598  const Scheduler_Control *scheduler,
599  Thread_Control          *the_thread,
600  Per_CPU_Control         *cpu
601)
602{
603  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
604}
605
606RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
607  const Scheduler_Control *scheduler,
608  uint32_t                 cpu_index
609)
610{
611#if defined(RTEMS_SMP)
612  const Per_CPU_Control   *cpu;
613  const Scheduler_Control *scheduler_of_cpu;
614
615  cpu = _Per_CPU_Get_by_index( cpu_index );
616  scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
617
618  return scheduler_of_cpu == scheduler;
619#else
620  (void) scheduler;
621  (void) cpu_index;
622
623  return true;
624#endif
625}
626
627RTEMS_INLINE_ROUTINE const Processor_mask *_Scheduler_Get_processors(
628  const Scheduler_Control *scheduler
629)
630{
631#if defined(RTEMS_SMP)
632  return &_Scheduler_Get_context( scheduler )->Processors;
633#else
634  return &_Processor_mask_The_one_and_only;
635#endif
636}
637
638bool _Scheduler_Get_affinity(
639  Thread_Control *the_thread,
640  size_t          cpusetsize,
641  cpu_set_t      *cpuset
642);
643
644RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
645  const Scheduler_Control *scheduler,
646  Thread_Control          *the_thread,
647  Scheduler_Node          *node,
648  const Processor_mask    *affinity
649)
650{
651  (void) scheduler;
652  (void) the_thread;
653  (void) node;
654  return _Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() );
655}
656
657bool _Scheduler_Set_affinity(
658  Thread_Control  *the_thread,
659  size_t           cpusetsize,
660  const cpu_set_t *cpuset
661);
662
663RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
664  const Scheduler_Control *scheduler,
665  Thread_Control          *the_thread,
666  Scheduler_Node          *node,
667  void                  ( *extract )(
668                             const Scheduler_Control *,
669                             Thread_Control *,
670                             Scheduler_Node *
671                        ),
672  void                  ( *schedule )(
673                             const Scheduler_Control *,
674                             Thread_Control *,
675                             bool
676                        )
677)
678{
679  ( *extract )( scheduler, the_thread, node );
680
681  /* TODO: flash critical section? */
682
683  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
684    ( *schedule )( scheduler, the_thread, true );
685  }
686}
687
688RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
689  const Scheduler_Control *scheduler
690)
691{
692#if defined(RTEMS_SMP)
693  const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
694
695  return _Processor_mask_Count( &context->Processors );
696#else
697  (void) scheduler;
698
699  return 1;
700#endif
701}
702
703RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
704{
705  return _Objects_Build_id(
706    OBJECTS_FAKE_OBJECTS_API,
707    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
708    _Objects_Local_node,
709    (uint16_t) ( scheduler_index + 1 )
710  );
711}
712
713RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
714{
715  uint32_t minimum_id = _Scheduler_Build_id( 0 );
716
717  return id - minimum_id;
718}
719
720RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
721  Objects_Id id
722)
723{
724  uint32_t index;
725
726  index = _Scheduler_Get_index_by_id( id );
727
728  if ( index >= _Scheduler_Count ) {
729    return NULL;
730  }
731
732  return &_Scheduler_Table[ index ];
733}
734
735RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
736  const Scheduler_Control *scheduler
737)
738{
739  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
740}
741
742#if defined(RTEMS_SMP)
743/**
744 * @brief Gets an idle thread from the scheduler instance.
745 *
746 * @param[in] context The scheduler instance context.
747 *
748 * @retval idle An idle thread for use.  This function must always return an
749 * idle thread.  If none is available, then this is a fatal error.
750 */
751typedef Thread_Control *( *Scheduler_Get_idle_thread )(
752  Scheduler_Context *context
753);
754
755/**
756 * @brief Releases an idle thread to the scheduler instance for reuse.
757 *
758 * @param[in] context The scheduler instance context.
759 * @param[in] idle The idle thread to release
760 */
761typedef void ( *Scheduler_Release_idle_thread )(
762  Scheduler_Context *context,
763  Thread_Control    *idle
764);
765
766RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
767  Thread_Control         *the_thread,
768  Thread_Scheduler_state  new_state
769)
770{
771  _Assert(
772    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
773      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
774      || !_System_state_Is_up( _System_state_Get() )
775  );
776
777  the_thread->Scheduler.state = new_state;
778}
779
780RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
781  Scheduler_Node *node,
782  Thread_Control *idle
783)
784{
785  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
786  _Assert(
787    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
788  );
789
790  _Scheduler_Node_set_user( node, idle );
791  node->idle = idle;
792}
793
794/**
795 * @brief Use an idle thread for this scheduler node.
796 *
797 * A thread those home scheduler node has a sticky level greater than zero may
798 * use an idle thread in the home scheduler instance in case it executes
799 * currently in another scheduler instance or in case it is in a blocking
800 * state.
801 *
802 * @param[in] context The scheduler instance context.
803 * @param[in] node The node which wants to use the idle thread.
804 * @param[in] cpu The processor for the idle thread.
805 * @param[in] get_idle_thread Function to get an idle thread.
806 */
807RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
808  Scheduler_Context         *context,
809  Scheduler_Node            *node,
810  Per_CPU_Control           *cpu,
811  Scheduler_Get_idle_thread  get_idle_thread
812)
813{
814  Thread_Control *idle = ( *get_idle_thread )( context );
815
816  _Scheduler_Set_idle_thread( node, idle );
817  _Thread_Set_CPU( idle, cpu );
818  return idle;
819}
820
821typedef enum {
822  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
823  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
824  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
825} Scheduler_Try_to_schedule_action;
826
827/**
828 * @brief Try to schedule this scheduler node.
829 *
830 * @param[in] context The scheduler instance context.
831 * @param[in] node The node which wants to get scheduled.
832 * @param[in] idle A potential idle thread used by a potential victim node.
833 * @param[in] get_idle_thread Function to get an idle thread.
834 *
835 * @retval true This node can be scheduled.
836 * @retval false Otherwise.
837 */
838RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
839_Scheduler_Try_to_schedule_node(
840  Scheduler_Context         *context,
841  Scheduler_Node            *node,
842  Thread_Control            *idle,
843  Scheduler_Get_idle_thread  get_idle_thread
844)
845{
846  ISR_lock_Context                  lock_context;
847  Scheduler_Try_to_schedule_action  action;
848  Thread_Control                   *owner;
849
850  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
851  owner = _Scheduler_Node_get_owner( node );
852  _Assert( _Scheduler_Node_get_user( node ) == owner );
853  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
854
855  _Thread_Scheduler_acquire_critical( owner, &lock_context );
856
857  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
858    _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
859    _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
860  } else if (
861    owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
862      && node->sticky_level <= 1
863  ) {
864    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
865  } else if ( node->sticky_level == 0 ) {
866    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
867  } else if ( idle != NULL ) {
868    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
869  } else {
870    _Scheduler_Use_idle_thread(
871      context,
872      node,
873      _Thread_Get_CPU( owner ),
874      get_idle_thread
875    );
876  }
877
878  _Thread_Scheduler_release_critical( owner, &lock_context );
879  return action;
880}
881
882/**
883 * @brief Release an idle thread using this scheduler node.
884 *
885 * @param[in] context The scheduler instance context.
886 * @param[in] node The node which may have an idle thread as user.
887 * @param[in] release_idle_thread Function to release an idle thread.
888 *
889 * @retval idle The idle thread which used this node.
890 * @retval NULL This node had no idle thread as an user.
891 */
892RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
893  Scheduler_Context             *context,
894  Scheduler_Node                *node,
895  Scheduler_Release_idle_thread  release_idle_thread
896)
897{
898  Thread_Control *idle = _Scheduler_Node_get_idle( node );
899
900  if ( idle != NULL ) {
901    Thread_Control *owner = _Scheduler_Node_get_owner( node );
902
903    node->idle = NULL;
904    _Scheduler_Node_set_user( node, owner );
905    ( *release_idle_thread )( context, idle );
906  }
907
908  return idle;
909}
910
911RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
912  Scheduler_Node *needs_idle,
913  Scheduler_Node *uses_idle,
914  Thread_Control *idle
915)
916{
917  uses_idle->idle = NULL;
918  _Scheduler_Node_set_user(
919    uses_idle,
920    _Scheduler_Node_get_owner( uses_idle )
921  );
922  _Scheduler_Set_idle_thread( needs_idle, idle );
923}
924
925/**
926 * @brief Block this scheduler node.
927 *
928 * @param[in] context The scheduler instance context.
929 * @param[in] thread The thread which wants to get blocked referencing this
930 *   node.  This is not necessarily the user of this node in case the node
931 *   participates in the scheduler helping protocol.
932 * @param[in] node The node which wants to get blocked.
933 * @param[in] is_scheduled This node is scheduled.
934 * @param[in] get_idle_thread Function to get an idle thread.
935 *
936 * @retval thread_cpu The processor of the thread.  Indicates to continue with
937 *   the blocking operation.
938 * @retval NULL Otherwise.
939 */
940RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
941  Scheduler_Context         *context,
942  Thread_Control            *thread,
943  Scheduler_Node            *node,
944  bool                       is_scheduled,
945  Scheduler_Get_idle_thread  get_idle_thread
946)
947{
948  int               sticky_level;
949  ISR_lock_Context  lock_context;
950  Per_CPU_Control  *thread_cpu;
951
952  sticky_level = node->sticky_level;
953  --sticky_level;
954  node->sticky_level = sticky_level;
955  _Assert( sticky_level >= 0 );
956
957  _Thread_Scheduler_acquire_critical( thread, &lock_context );
958  thread_cpu = _Thread_Get_CPU( thread );
959  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
960  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
961  _Thread_Scheduler_release_critical( thread, &lock_context );
962
963  if ( sticky_level > 0 ) {
964    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
965      Thread_Control *idle;
966
967      idle = _Scheduler_Use_idle_thread(
968        context,
969        node,
970        thread_cpu,
971        get_idle_thread
972      );
973      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
974    }
975
976    return NULL;
977  }
978
979  _Assert( thread == _Scheduler_Node_get_user( node ) );
980  return thread_cpu;
981}
982
983RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
984  Scheduler_Context             *context,
985  Thread_Control                *the_thread,
986  Scheduler_Node                *node,
987  Scheduler_Release_idle_thread  release_idle_thread
988)
989{
990  Thread_Control  *idle;
991  Thread_Control  *owner;
992  Per_CPU_Control *cpu;
993
994  idle = _Scheduler_Node_get_idle( node );
995  owner = _Scheduler_Node_get_owner( node );
996
997  node->idle = NULL;
998  _Assert( _Scheduler_Node_get_user( node ) == idle );
999  _Scheduler_Node_set_user( node, owner );
1000  ( *release_idle_thread )( context, idle );
1001
1002  cpu = _Thread_Get_CPU( idle );
1003  _Thread_Set_CPU( the_thread, cpu );
1004  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
1005}
1006
1007/**
1008 * @brief Unblock this scheduler node.
1009 *
1010 * @param[in] context The scheduler instance context.
1011 * @param[in] the_thread The thread which wants to get unblocked.
1012 * @param[in] node The node which wants to get unblocked.
1013 * @param[in] is_scheduled This node is scheduled.
1014 * @param[in] release_idle_thread Function to release an idle thread.
1015 *
1016 * @retval true Continue with the unblocking operation.
1017 * @retval false Otherwise.
1018 */
1019RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1020  Scheduler_Context             *context,
1021  Thread_Control                *the_thread,
1022  Scheduler_Node                *node,
1023  bool                           is_scheduled,
1024  Scheduler_Release_idle_thread  release_idle_thread
1025)
1026{
1027  bool unblock;
1028
1029  ++node->sticky_level;
1030  _Assert( node->sticky_level > 0 );
1031
1032  if ( is_scheduled ) {
1033    _Scheduler_Discard_idle_thread(
1034      context,
1035      the_thread,
1036      node,
1037      release_idle_thread
1038    );
1039    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1040    unblock = false;
1041  } else {
1042    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1043    unblock = true;
1044  }
1045
1046  return unblock;
1047}
1048#endif
1049
1050RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1051  Thread_Control *new_heir,
1052  bool            force_dispatch
1053)
1054{
1055  Thread_Control *heir = _Thread_Heir;
1056
1057  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1058#if defined(RTEMS_SMP)
1059    /*
1060     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1061     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1062     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1063     * schedulers.
1064     */
1065    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1066    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1067#endif
1068    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1069    _Thread_Heir = new_heir;
1070    _Thread_Dispatch_necessary = true;
1071  }
1072}
1073
1074RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1075  const Scheduler_Control *new_scheduler,
1076  Thread_Control          *the_thread,
1077  Priority_Control         priority
1078)
1079{
1080  Scheduler_Node          *new_scheduler_node;
1081  Scheduler_Node          *old_scheduler_node;
1082#if defined(RTEMS_SMP)
1083  ISR_lock_Context         lock_context;
1084  const Scheduler_Control *old_scheduler;
1085
1086#endif
1087
1088  if ( the_thread->Wait.queue != NULL ) {
1089    return STATUS_RESOURCE_IN_USE;
1090  }
1091
1092  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1093  _Priority_Plain_extract(
1094    &old_scheduler_node->Wait.Priority,
1095    &the_thread->Real_priority
1096  );
1097
1098  if (
1099    !_Priority_Is_empty( &old_scheduler_node->Wait.Priority )
1100#if defined(RTEMS_SMP)
1101      || !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes )
1102      || the_thread->Scheduler.pin_level != 0
1103#endif
1104  ) {
1105    _Priority_Plain_insert(
1106      &old_scheduler_node->Wait.Priority,
1107      &the_thread->Real_priority,
1108      the_thread->Real_priority.priority
1109    );
1110    return STATUS_RESOURCE_IN_USE;
1111  }
1112
1113#if defined(RTEMS_SMP)
1114  old_scheduler = _Thread_Scheduler_get_home( the_thread );
1115  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1116    the_thread,
1117    _Scheduler_Get_index( new_scheduler )
1118  );
1119
1120  _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1121
1122  if (
1123    _Scheduler_Get_processor_count( new_scheduler ) == 0
1124      || !( *new_scheduler->Operations.set_affinity )(
1125        new_scheduler,
1126        the_thread,
1127        new_scheduler_node,
1128        &the_thread->Scheduler.Affinity
1129      )
1130  ) {
1131    _Scheduler_Release_critical( new_scheduler, &lock_context );
1132    _Priority_Plain_insert(
1133      &old_scheduler_node->Wait.Priority,
1134      &the_thread->Real_priority,
1135      the_thread->Real_priority.priority
1136    );
1137    return STATUS_UNSATISFIED;
1138  }
1139
1140  _Assert( the_thread->Scheduler.pinned_scheduler == NULL );
1141  the_thread->Scheduler.home_scheduler = new_scheduler;
1142
1143  _Scheduler_Release_critical( new_scheduler, &lock_context );
1144
1145  _Thread_Scheduler_process_requests( the_thread );
1146#else
1147  new_scheduler_node = old_scheduler_node;
1148#endif
1149
1150  the_thread->Start.initial_priority = priority;
1151  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1152  _Priority_Initialize_one(
1153    &new_scheduler_node->Wait.Priority,
1154    &the_thread->Real_priority
1155  );
1156
1157#if defined(RTEMS_SMP)
1158  if ( old_scheduler != new_scheduler ) {
1159    States_Control current_state;
1160
1161    current_state = the_thread->current_state;
1162
1163    if ( _States_Is_ready( current_state ) ) {
1164      _Scheduler_Block( the_thread );
1165    }
1166
1167    _Assert( old_scheduler_node->sticky_level == 0 );
1168    _Assert( new_scheduler_node->sticky_level == 0 );
1169
1170    _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1171    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1172    _Chain_Initialize_one(
1173      &the_thread->Scheduler.Wait_nodes,
1174      &new_scheduler_node->Thread.Wait_node
1175    );
1176    _Chain_Extract_unprotected(
1177      &old_scheduler_node->Thread.Scheduler_node.Chain
1178    );
1179    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1180    _Chain_Initialize_one(
1181      &the_thread->Scheduler.Scheduler_nodes,
1182      &new_scheduler_node->Thread.Scheduler_node.Chain
1183    );
1184
1185    _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1186
1187    if ( _States_Is_ready( current_state ) ) {
1188      _Scheduler_Unblock( the_thread );
1189    }
1190
1191    return STATUS_SUCCESSFUL;
1192  }
1193#endif
1194
1195  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1196  _Scheduler_Update_priority( the_thread );
1197  return STATUS_SUCCESSFUL;
1198}
1199
1200/** @} */
1201
1202#ifdef __cplusplus
1203}
1204#endif
1205
1206#endif
1207/* end of include file */
Note: See TracBrowser for help on using the repository browser.