source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 197a614

5
Last change on this file since 197a614 was 197a614, checked in by Sebastian Huber <sebastian.huber@…>, on 07/05/17 at 13:14:26

score: Add scheduler node to set affinity op

Update #3059.

  • Property mode set to 100644
File size: 32.9 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2017 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/cpusetimpl.h>
26#include <rtems/score/priorityimpl.h>
27#include <rtems/score/smpimpl.h>
28#include <rtems/score/status.h>
29#include <rtems/score/threadimpl.h>
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35/**
36 * @addtogroup ScoreScheduler
37 */
38/**@{**/
39
40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
57  const Per_CPU_Control *cpu
58)
59{
60#if defined(RTEMS_SMP)
61  return cpu->Scheduler.control;
62#else
63  (void) cpu;
64  return &_Scheduler_Table[ 0 ];
65#endif
66}
67
68/**
69 * @brief Acquires the scheduler instance inside a critical section (interrupts
70 * disabled).
71 *
72 * @param[in] scheduler The scheduler instance.
73 * @param[in] lock_context The lock context to use for
74 *   _Scheduler_Release_critical().
75 */
76RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
77  const Scheduler_Control *scheduler,
78  ISR_lock_Context        *lock_context
79)
80{
81#if defined(RTEMS_SMP)
82  Scheduler_Context *context;
83
84  context = _Scheduler_Get_context( scheduler );
85  _ISR_lock_Acquire( &context->Lock, lock_context );
86#else
87  (void) scheduler;
88  (void) lock_context;
89#endif
90}
91
92/**
93 * @brief Releases the scheduler instance inside a critical section (interrupts
94 * disabled).
95 *
96 * @param[in] scheduler The scheduler instance.
97 * @param[in] lock_context The lock context used for
98 *   _Scheduler_Acquire_critical().
99 */
100RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
101  const Scheduler_Control *scheduler,
102  ISR_lock_Context        *lock_context
103)
104{
105#if defined(RTEMS_SMP)
106  Scheduler_Context *context;
107
108  context = _Scheduler_Get_context( scheduler );
109  _ISR_lock_Release( &context->Lock, lock_context );
110#else
111  (void) scheduler;
112  (void) lock_context;
113#endif
114}
115
116#if defined(RTEMS_SMP)
117void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
118
119/**
120 * @brief Registers an ask for help request if necessary.
121 *
122 * The actual ask for help operation is carried out during
123 * _Thread_Do_dispatch() on a processor related to the thread.  This yields a
124 * better separation of scheduler instances.  A thread of one scheduler
125 * instance should not be forced to carry out too much work for threads on
126 * other scheduler instances.
127 *
128 * @param[in] the_thread The thread in need for help.
129 */
130RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
131{
132  _Assert( _Thread_State_is_owner( the_thread ) );
133
134  if ( the_thread->Scheduler.helping_nodes > 0 ) {
135    _Scheduler_Request_ask_for_help( the_thread );
136  }
137}
138#endif
139
140/**
141 * The preferred method to add a new scheduler is to define the jump table
142 * entries and add a case to the _Scheduler_Initialize routine.
143 *
144 * Generic scheduling implementations that rely on the ready queue only can
145 * be found in the _Scheduler_queue_XXX functions.
146 */
147
148/*
149 * Passing the Scheduler_Control* to these functions allows for multiple
150 * scheduler's to exist simultaneously, which could be useful on an SMP
151 * system.  Then remote Schedulers may be accessible.  How to protect such
152 * accesses remains an open problem.
153 */
154
155/**
156 * @brief General scheduling decision.
157 *
158 * This kernel routine implements the scheduling decision logic for
159 * the scheduler. It does NOT dispatch.
160 *
161 * @param[in] the_thread The thread which state changed previously.
162 */
163RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
164{
165  const Scheduler_Control *scheduler;
166  ISR_lock_Context         lock_context;
167
168  scheduler = _Thread_Scheduler_get_home( the_thread );
169  _Scheduler_Acquire_critical( scheduler, &lock_context );
170
171  ( *scheduler->Operations.schedule )( scheduler, the_thread );
172
173  _Scheduler_Release_critical( scheduler, &lock_context );
174}
175
176/**
177 * @brief Scheduler yield with a particular thread.
178 *
179 * This routine is invoked when a thread wishes to voluntarily transfer control
180 * of the processor to another thread.
181 *
182 * @param[in] the_thread The yielding thread.
183 */
184RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
185{
186  const Scheduler_Control *scheduler;
187  ISR_lock_Context         lock_context;
188
189  scheduler = _Thread_Scheduler_get_home( the_thread );
190  _Scheduler_Acquire_critical( scheduler, &lock_context );
191  ( *scheduler->Operations.yield )(
192    scheduler,
193    the_thread,
194    _Thread_Scheduler_get_home_node( the_thread )
195  );
196  _Scheduler_Release_critical( scheduler, &lock_context );
197}
198
199/**
200 * @brief Blocks a thread with respect to the scheduler.
201 *
202 * This routine removes @a the_thread from the scheduling decision for
203 * the scheduler. The primary task is to remove the thread from the
204 * ready queue.  It performs any necessary schedulering operations
205 * including the selection of a new heir thread.
206 *
207 * @param[in] the_thread The thread.
208 */
209RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
210{
211#if defined(RTEMS_SMP)
212  Chain_Node              *node;
213  const Chain_Node        *tail;
214  Scheduler_Node          *scheduler_node;
215  const Scheduler_Control *scheduler;
216  ISR_lock_Context         lock_context;
217
218  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
219  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
220
221  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
222  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
223
224  _Scheduler_Acquire_critical( scheduler, &lock_context );
225  ( *scheduler->Operations.block )(
226    scheduler,
227    the_thread,
228    scheduler_node
229  );
230  _Scheduler_Release_critical( scheduler, &lock_context );
231
232  node = _Chain_Next( node );
233
234  while ( node != tail ) {
235    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
236    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
237
238    _Scheduler_Acquire_critical( scheduler, &lock_context );
239    ( *scheduler->Operations.withdraw_node )(
240      scheduler,
241      the_thread,
242      scheduler_node,
243      THREAD_SCHEDULER_BLOCKED
244    );
245    _Scheduler_Release_critical( scheduler, &lock_context );
246
247    node = _Chain_Next( node );
248  }
249#else
250  const Scheduler_Control *scheduler;
251
252  scheduler = _Thread_Scheduler_get_home( the_thread );
253  ( *scheduler->Operations.block )(
254    scheduler,
255    the_thread,
256    _Thread_Scheduler_get_home_node( the_thread )
257  );
258#endif
259}
260
261/**
262 * @brief Unblocks a thread with respect to the scheduler.
263 *
264 * This operation must fetch the latest thread priority value for this
265 * scheduler instance and update its internal state if necessary.
266 *
267 * @param[in] the_thread The thread.
268 *
269 * @see _Scheduler_Node_get_priority().
270 */
271RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
272{
273  const Scheduler_Control *scheduler;
274  ISR_lock_Context         lock_context;
275
276  scheduler = _Thread_Scheduler_get_home( the_thread );
277  _Scheduler_Acquire_critical( scheduler, &lock_context );
278  ( *scheduler->Operations.unblock )(
279    scheduler,
280    the_thread,
281    _Thread_Scheduler_get_home_node( the_thread )
282  );
283  _Scheduler_Release_critical( scheduler, &lock_context );
284}
285
286/**
287 * @brief Propagates a priority change of a thread to the scheduler.
288 *
289 * On uni-processor configurations, this operation must evaluate the thread
290 * state.  In case the thread is not ready, then the priority update should be
291 * deferred to the next scheduler unblock operation.
292 *
293 * The operation must update the heir and thread dispatch necessary variables
294 * in case the set of scheduled threads changes.
295 *
296 * @param[in] the_thread The thread changing its priority.
297 *
298 * @see _Scheduler_Node_get_priority().
299 */
300RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
301{
302#if defined(RTEMS_SMP)
303  Chain_Node       *node;
304  const Chain_Node *tail;
305
306  _Thread_Scheduler_process_requests( the_thread );
307
308  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
309  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
310
311  do {
312    Scheduler_Node          *scheduler_node;
313    const Scheduler_Control *scheduler;
314    ISR_lock_Context         lock_context;
315
316    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
317    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
318
319    _Scheduler_Acquire_critical( scheduler, &lock_context );
320    ( *scheduler->Operations.update_priority )(
321      scheduler,
322      the_thread,
323      scheduler_node
324    );
325    _Scheduler_Release_critical( scheduler, &lock_context );
326
327    node = _Chain_Next( node );
328  } while ( node != tail );
329#else
330  const Scheduler_Control *scheduler;
331
332  scheduler = _Thread_Scheduler_get_home( the_thread );
333  ( *scheduler->Operations.update_priority )(
334    scheduler,
335    the_thread,
336    _Thread_Scheduler_get_home_node( the_thread )
337  );
338#endif
339}
340
341#if defined(RTEMS_SMP)
342/**
343 * @brief Changes the sticky level of the home scheduler node and propagates a
344 * priority change of a thread to the scheduler.
345 *
346 * @param[in] the_thread The thread changing its priority or sticky level.
347 *
348 * @see _Scheduler_Update_priority().
349 */
350RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
351  Thread_Control *the_thread,
352  int             sticky_level_change
353)
354{
355  Chain_Node              *node;
356  const Chain_Node        *tail;
357  Scheduler_Node          *scheduler_node;
358  const Scheduler_Control *scheduler;
359  ISR_lock_Context         lock_context;
360
361  _Thread_Scheduler_process_requests( the_thread );
362
363  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
364  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
365  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
366
367  _Scheduler_Acquire_critical( scheduler, &lock_context );
368
369  scheduler_node->sticky_level += sticky_level_change;
370  _Assert( scheduler_node->sticky_level >= 0 );
371
372  ( *scheduler->Operations.update_priority )(
373    scheduler,
374    the_thread,
375    scheduler_node
376  );
377
378  _Scheduler_Release_critical( scheduler, &lock_context );
379
380  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
381  node = _Chain_Next( node );
382
383  while ( node != tail ) {
384    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
385    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
386
387    _Scheduler_Acquire_critical( scheduler, &lock_context );
388    ( *scheduler->Operations.update_priority )(
389      scheduler,
390      the_thread,
391      scheduler_node
392    );
393    _Scheduler_Release_critical( scheduler, &lock_context );
394
395    node = _Chain_Next( node );
396  }
397}
398#endif
399
400/**
401 * @brief Maps a thread priority from the user domain to the scheduler domain.
402 *
403 * Let M be the maximum scheduler priority.  The mapping must be bijective in
404 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
405 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
406 * other values the mapping is undefined.
407 *
408 * @param[in] scheduler The scheduler instance.
409 * @param[in] priority The user domain thread priority.
410 *
411 * @return The corresponding thread priority of the scheduler domain is returned.
412 */
413RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
414  const Scheduler_Control *scheduler,
415  Priority_Control         priority
416)
417{
418  return ( *scheduler->Operations.map_priority )( scheduler, priority );
419}
420
421/**
422 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
423 *
424 * @param[in] scheduler The scheduler instance.
425 * @param[in] priority The scheduler domain thread priority.
426 *
427 * @return The corresponding thread priority of the user domain is returned.
428 */
429RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
430  const Scheduler_Control *scheduler,
431  Priority_Control         priority
432)
433{
434  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
435}
436
437/**
438 * @brief Initializes a scheduler node.
439 *
440 * The scheduler node contains arbitrary data on function entry.  The caller
441 * must ensure that _Scheduler_Node_destroy() will be called after a
442 * _Scheduler_Node_initialize() before the memory of the scheduler node is
443 * destroyed.
444 *
445 * @param[in] scheduler The scheduler instance.
446 * @param[in] node The scheduler node to initialize.
447 * @param[in] the_thread The thread of the scheduler node to initialize.
448 * @param[in] priority The thread priority.
449 */
450RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
451  const Scheduler_Control *scheduler,
452  Scheduler_Node          *node,
453  Thread_Control          *the_thread,
454  Priority_Control         priority
455)
456{
457  ( *scheduler->Operations.node_initialize )(
458    scheduler,
459    node,
460    the_thread,
461    priority
462  );
463}
464
465/**
466 * @brief Destroys a scheduler node.
467 *
468 * The caller must ensure that _Scheduler_Node_destroy() will be called only
469 * after a corresponding _Scheduler_Node_initialize().
470 *
471 * @param[in] scheduler The scheduler instance.
472 * @param[in] node The scheduler node to destroy.
473 */
474RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
475  const Scheduler_Control *scheduler,
476  Scheduler_Node          *node
477)
478{
479  ( *scheduler->Operations.node_destroy )( scheduler, node );
480}
481
482/**
483 * @brief Releases a job of a thread with respect to the scheduler.
484 *
485 * @param[in] the_thread The thread.
486 * @param[in] priority_node The priority node of the job.
487 * @param[in] deadline The deadline in watchdog ticks since boot.
488 * @param[in] queue_context The thread queue context to provide the set of
489 *   threads for _Thread_Priority_update().
490 */
491RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
492  Thread_Control       *the_thread,
493  Priority_Node        *priority_node,
494  uint64_t              deadline,
495  Thread_queue_Context *queue_context
496)
497{
498  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
499
500  _Thread_queue_Context_clear_priority_updates( queue_context );
501  ( *scheduler->Operations.release_job )(
502    scheduler,
503    the_thread,
504    priority_node,
505    deadline,
506    queue_context
507  );
508}
509
510/**
511 * @brief Cancels a job of a thread with respect to the scheduler.
512 *
513 * @param[in] the_thread The thread.
514 * @param[in] priority_node The priority node of the job.
515 * @param[in] queue_context The thread queue context to provide the set of
516 *   threads for _Thread_Priority_update().
517 */
518RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
519  Thread_Control       *the_thread,
520  Priority_Node        *priority_node,
521  Thread_queue_Context *queue_context
522)
523{
524  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
525
526  _Thread_queue_Context_clear_priority_updates( queue_context );
527  ( *scheduler->Operations.cancel_job )(
528    scheduler,
529    the_thread,
530    priority_node,
531    queue_context
532  );
533}
534
535/**
536 * @brief Scheduler method invoked at each clock tick.
537 *
538 * This method is invoked at each clock tick to allow the scheduler
539 * implementation to perform any activities required.  For the
540 * scheduler which support standard RTEMS features, this includes
541 * time-slicing management.
542 */
543RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
544{
545  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
546  Thread_Control *executing = cpu->executing;
547
548  if ( scheduler != NULL && executing != NULL ) {
549    ( *scheduler->Operations.tick )( scheduler, executing );
550  }
551}
552
553/**
554 * @brief Starts the idle thread for a particular processor.
555 *
556 * @param[in] scheduler The scheduler instance.
557 * @param[in,out] the_thread The idle thread for the processor.
558 * @param[in,out] cpu The processor for the idle thread.
559 *
560 * @see _Thread_Create_idle().
561 */
562RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
563  const Scheduler_Control *scheduler,
564  Thread_Control          *the_thread,
565  Per_CPU_Control         *cpu
566)
567{
568  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
569}
570
571RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
572  const Scheduler_Control *scheduler,
573  uint32_t                 cpu_index
574)
575{
576#if defined(RTEMS_SMP)
577  const Per_CPU_Control   *cpu;
578  const Scheduler_Control *scheduler_of_cpu;
579
580  cpu = _Per_CPU_Get_by_index( cpu_index );
581  scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
582
583  return scheduler_of_cpu == scheduler;
584#else
585  (void) scheduler;
586  (void) cpu_index;
587
588  return true;
589#endif
590}
591
592RTEMS_INLINE_ROUTINE const Processor_mask *_Scheduler_Get_processors(
593  const Scheduler_Control *scheduler
594)
595{
596#if defined(RTEMS_SMP)
597  return &_Scheduler_Get_context( scheduler )->Processors;
598#else
599  return &_Processor_mask_The_one_and_only;
600#endif
601}
602
603bool _Scheduler_Get_affinity(
604  Thread_Control *the_thread,
605  size_t          cpusetsize,
606  cpu_set_t      *cpuset
607);
608
609RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
610  const Scheduler_Control *scheduler,
611  Thread_Control          *the_thread,
612  Scheduler_Node          *node,
613  const Processor_mask    *affinity
614)
615{
616  (void) scheduler;
617  (void) the_thread;
618  (void) node;
619  return _Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() );
620}
621
622bool _Scheduler_Set_affinity(
623  Thread_Control  *the_thread,
624  size_t           cpusetsize,
625  const cpu_set_t *cpuset
626);
627
628RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
629  const Scheduler_Control *scheduler,
630  Thread_Control          *the_thread,
631  Scheduler_Node          *node,
632  void                  ( *extract )(
633                             const Scheduler_Control *,
634                             Thread_Control *,
635                             Scheduler_Node *
636                        ),
637  void                  ( *schedule )(
638                             const Scheduler_Control *,
639                             Thread_Control *,
640                             bool
641                        )
642)
643{
644  ( *extract )( scheduler, the_thread, node );
645
646  /* TODO: flash critical section? */
647
648  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
649    ( *schedule )( scheduler, the_thread, true );
650  }
651}
652
653RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
654  const Scheduler_Control *scheduler
655)
656{
657#if defined(RTEMS_SMP)
658  const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
659
660  return _Processor_mask_Count( &context->Processors );
661#else
662  (void) scheduler;
663
664  return 1;
665#endif
666}
667
668RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
669{
670  return _Objects_Build_id(
671    OBJECTS_FAKE_OBJECTS_API,
672    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
673    _Objects_Local_node,
674    (uint16_t) ( scheduler_index + 1 )
675  );
676}
677
678RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
679{
680  uint32_t minimum_id = _Scheduler_Build_id( 0 );
681
682  return id - minimum_id;
683}
684
685RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
686  Objects_Id id
687)
688{
689  uint32_t index;
690
691  index = _Scheduler_Get_index_by_id( id );
692
693  if ( index >= _Scheduler_Count ) {
694    return NULL;
695  }
696
697  return &_Scheduler_Table[ index ];
698}
699
700RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
701  const Scheduler_Control *scheduler
702)
703{
704  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
705}
706
707RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
708  Thread_Control   *the_thread,
709  Priority_Control  new_priority,
710  bool              prepend_it
711)
712{
713  Scheduler_Node *scheduler_node;
714
715  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
716  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
717}
718
719#if defined(RTEMS_SMP)
720/**
721 * @brief Gets an idle thread from the scheduler instance.
722 *
723 * @param[in] context The scheduler instance context.
724 *
725 * @retval idle An idle thread for use.  This function must always return an
726 * idle thread.  If none is available, then this is a fatal error.
727 */
728typedef Thread_Control *( *Scheduler_Get_idle_thread )(
729  Scheduler_Context *context
730);
731
732/**
733 * @brief Releases an idle thread to the scheduler instance for reuse.
734 *
735 * @param[in] context The scheduler instance context.
736 * @param[in] idle The idle thread to release
737 */
738typedef void ( *Scheduler_Release_idle_thread )(
739  Scheduler_Context *context,
740  Thread_Control    *idle
741);
742
743RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
744  Thread_Control         *the_thread,
745  Thread_Scheduler_state  new_state
746)
747{
748  _Assert(
749    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
750      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
751      || !_System_state_Is_up( _System_state_Get() )
752  );
753
754  the_thread->Scheduler.state = new_state;
755}
756
757RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
758  Scheduler_Node *node,
759  Thread_Control *idle
760)
761{
762  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
763  _Assert(
764    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
765  );
766
767  _Scheduler_Node_set_user( node, idle );
768  node->idle = idle;
769}
770
771/**
772 * @brief Use an idle thread for this scheduler node.
773 *
774 * A thread those home scheduler node has a sticky level greater than zero may
775 * use an idle thread in the home scheduler instance in case it executes
776 * currently in another scheduler instance or in case it is in a blocking
777 * state.
778 *
779 * @param[in] context The scheduler instance context.
780 * @param[in] node The node which wants to use the idle thread.
781 * @param[in] cpu The processor for the idle thread.
782 * @param[in] get_idle_thread Function to get an idle thread.
783 */
784RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
785  Scheduler_Context         *context,
786  Scheduler_Node            *node,
787  Per_CPU_Control           *cpu,
788  Scheduler_Get_idle_thread  get_idle_thread
789)
790{
791  Thread_Control *idle = ( *get_idle_thread )( context );
792
793  _Scheduler_Set_idle_thread( node, idle );
794  _Thread_Set_CPU( idle, cpu );
795  return idle;
796}
797
798typedef enum {
799  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
800  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
801  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
802} Scheduler_Try_to_schedule_action;
803
804/**
805 * @brief Try to schedule this scheduler node.
806 *
807 * @param[in] context The scheduler instance context.
808 * @param[in] node The node which wants to get scheduled.
809 * @param[in] idle A potential idle thread used by a potential victim node.
810 * @param[in] get_idle_thread Function to get an idle thread.
811 *
812 * @retval true This node can be scheduled.
813 * @retval false Otherwise.
814 */
815RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
816_Scheduler_Try_to_schedule_node(
817  Scheduler_Context         *context,
818  Scheduler_Node            *node,
819  Thread_Control            *idle,
820  Scheduler_Get_idle_thread  get_idle_thread
821)
822{
823  ISR_lock_Context                  lock_context;
824  Scheduler_Try_to_schedule_action  action;
825  Thread_Control                   *owner;
826
827  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
828  owner = _Scheduler_Node_get_owner( node );
829  _Assert( _Scheduler_Node_get_user( node ) == owner );
830  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
831
832  _Thread_Scheduler_acquire_critical( owner, &lock_context );
833
834  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
835    _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
836    _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
837  } else if (
838    owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
839      && node->sticky_level <= 1
840  ) {
841    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
842  } else if ( node->sticky_level == 0 ) {
843    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
844  } else if ( idle != NULL ) {
845    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
846  } else {
847    _Scheduler_Use_idle_thread(
848      context,
849      node,
850      _Thread_Get_CPU( owner ),
851      get_idle_thread
852    );
853  }
854
855  _Thread_Scheduler_release_critical( owner, &lock_context );
856  return action;
857}
858
859/**
860 * @brief Release an idle thread using this scheduler node.
861 *
862 * @param[in] context The scheduler instance context.
863 * @param[in] node The node which may have an idle thread as user.
864 * @param[in] release_idle_thread Function to release an idle thread.
865 *
866 * @retval idle The idle thread which used this node.
867 * @retval NULL This node had no idle thread as an user.
868 */
869RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
870  Scheduler_Context             *context,
871  Scheduler_Node                *node,
872  Scheduler_Release_idle_thread  release_idle_thread
873)
874{
875  Thread_Control *idle = _Scheduler_Node_get_idle( node );
876
877  if ( idle != NULL ) {
878    Thread_Control *owner = _Scheduler_Node_get_owner( node );
879
880    node->idle = NULL;
881    _Scheduler_Node_set_user( node, owner );
882    ( *release_idle_thread )( context, idle );
883  }
884
885  return idle;
886}
887
888RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
889  Scheduler_Node *needs_idle,
890  Scheduler_Node *uses_idle,
891  Thread_Control *idle
892)
893{
894  uses_idle->idle = NULL;
895  _Scheduler_Node_set_user(
896    uses_idle,
897    _Scheduler_Node_get_owner( uses_idle )
898  );
899  _Scheduler_Set_idle_thread( needs_idle, idle );
900}
901
902/**
903 * @brief Block this scheduler node.
904 *
905 * @param[in] context The scheduler instance context.
906 * @param[in] thread The thread which wants to get blocked referencing this
907 *   node.  This is not necessarily the user of this node in case the node
908 *   participates in the scheduler helping protocol.
909 * @param[in] node The node which wants to get blocked.
910 * @param[in] is_scheduled This node is scheduled.
911 * @param[in] get_idle_thread Function to get an idle thread.
912 *
913 * @retval thread_cpu The processor of the thread.  Indicates to continue with
914 *   the blocking operation.
915 * @retval NULL Otherwise.
916 */
917RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
918  Scheduler_Context         *context,
919  Thread_Control            *thread,
920  Scheduler_Node            *node,
921  bool                       is_scheduled,
922  Scheduler_Get_idle_thread  get_idle_thread
923)
924{
925  int               sticky_level;
926  ISR_lock_Context  lock_context;
927  Per_CPU_Control  *thread_cpu;
928
929  sticky_level = node->sticky_level;
930  --sticky_level;
931  node->sticky_level = sticky_level;
932  _Assert( sticky_level >= 0 );
933
934  _Thread_Scheduler_acquire_critical( thread, &lock_context );
935  thread_cpu = _Thread_Get_CPU( thread );
936  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
937  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
938  _Thread_Scheduler_release_critical( thread, &lock_context );
939
940  if ( sticky_level > 0 ) {
941    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
942      Thread_Control *idle;
943
944      idle = _Scheduler_Use_idle_thread(
945        context,
946        node,
947        thread_cpu,
948        get_idle_thread
949      );
950      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
951    }
952
953    return NULL;
954  }
955
956  _Assert( thread == _Scheduler_Node_get_user( node ) );
957  return thread_cpu;
958}
959
960RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
961  Scheduler_Context             *context,
962  Thread_Control                *the_thread,
963  Scheduler_Node                *node,
964  Scheduler_Release_idle_thread  release_idle_thread
965)
966{
967  Thread_Control  *idle;
968  Thread_Control  *owner;
969  Per_CPU_Control *cpu;
970
971  idle = _Scheduler_Node_get_idle( node );
972  owner = _Scheduler_Node_get_owner( node );
973
974  node->idle = NULL;
975  _Assert( _Scheduler_Node_get_user( node ) == idle );
976  _Scheduler_Node_set_user( node, owner );
977  ( *release_idle_thread )( context, idle );
978
979  cpu = _Thread_Get_CPU( idle );
980  _Thread_Set_CPU( the_thread, cpu );
981  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
982}
983
984/**
985 * @brief Unblock this scheduler node.
986 *
987 * @param[in] context The scheduler instance context.
988 * @param[in] the_thread The thread which wants to get unblocked.
989 * @param[in] node The node which wants to get unblocked.
990 * @param[in] is_scheduled This node is scheduled.
991 * @param[in] release_idle_thread Function to release an idle thread.
992 *
993 * @retval true Continue with the unblocking operation.
994 * @retval false Otherwise.
995 */
996RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
997  Scheduler_Context             *context,
998  Thread_Control                *the_thread,
999  Scheduler_Node                *node,
1000  bool                           is_scheduled,
1001  Scheduler_Release_idle_thread  release_idle_thread
1002)
1003{
1004  bool unblock;
1005
1006  ++node->sticky_level;
1007  _Assert( node->sticky_level > 0 );
1008
1009  if ( is_scheduled ) {
1010    _Scheduler_Discard_idle_thread(
1011      context,
1012      the_thread,
1013      node,
1014      release_idle_thread
1015    );
1016    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1017    unblock = false;
1018  } else {
1019    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1020    unblock = true;
1021  }
1022
1023  return unblock;
1024}
1025#endif
1026
1027RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1028  Thread_Control *new_heir,
1029  bool            force_dispatch
1030)
1031{
1032  Thread_Control *heir = _Thread_Heir;
1033
1034  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1035#if defined(RTEMS_SMP)
1036    /*
1037     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1038     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1039     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1040     * schedulers.
1041     */
1042    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1043    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1044#endif
1045    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1046    _Thread_Heir = new_heir;
1047    _Thread_Dispatch_necessary = true;
1048  }
1049}
1050
1051RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1052  const Scheduler_Control *new_scheduler,
1053  Thread_Control          *the_thread,
1054  Priority_Control         priority
1055)
1056{
1057  Scheduler_Node          *new_scheduler_node;
1058  Scheduler_Node          *old_scheduler_node;
1059#if defined(RTEMS_SMP)
1060  ISR_lock_Context         lock_context;
1061  const Scheduler_Control *old_scheduler;
1062
1063#endif
1064
1065  if ( the_thread->Wait.queue != NULL ) {
1066    return STATUS_RESOURCE_IN_USE;
1067  }
1068
1069  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1070  _Priority_Plain_extract(
1071    &old_scheduler_node->Wait.Priority,
1072    &the_thread->Real_priority
1073  );
1074
1075  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1076    _Priority_Plain_insert(
1077      &old_scheduler_node->Wait.Priority,
1078      &the_thread->Real_priority,
1079      the_thread->Real_priority.priority
1080    );
1081    return STATUS_RESOURCE_IN_USE;
1082  }
1083
1084#if defined(RTEMS_SMP)
1085  if ( !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes ) ) {
1086    _Priority_Plain_insert(
1087      &old_scheduler_node->Wait.Priority,
1088      &the_thread->Real_priority,
1089      the_thread->Real_priority.priority
1090    );
1091    return STATUS_RESOURCE_IN_USE;
1092  }
1093
1094  old_scheduler = _Thread_Scheduler_get_home( the_thread );
1095
1096  _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1097
1098  if ( _Scheduler_Get_processor_count( new_scheduler ) == 0 ) {
1099    _Scheduler_Release_critical( new_scheduler, &lock_context );
1100    _Priority_Plain_insert(
1101      &old_scheduler_node->Wait.Priority,
1102      &the_thread->Real_priority,
1103      the_thread->Real_priority.priority
1104    );
1105    return STATUS_UNSATISFIED;
1106  }
1107
1108  the_thread->Scheduler.home = new_scheduler;
1109
1110  _Scheduler_Release_critical( new_scheduler, &lock_context );
1111
1112  _Thread_Scheduler_process_requests( the_thread );
1113  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1114    the_thread,
1115    _Scheduler_Get_index( new_scheduler )
1116  );
1117#else
1118  new_scheduler_node = old_scheduler_node;
1119#endif
1120
1121  the_thread->Start.initial_priority = priority;
1122  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1123  _Priority_Initialize_one(
1124    &new_scheduler_node->Wait.Priority,
1125    &the_thread->Real_priority
1126  );
1127
1128#if defined(RTEMS_SMP)
1129  if ( old_scheduler != new_scheduler ) {
1130    States_Control current_state;
1131
1132    current_state = the_thread->current_state;
1133
1134    if ( _States_Is_ready( current_state ) ) {
1135      _Scheduler_Block( the_thread );
1136    }
1137
1138    _Assert( old_scheduler_node->sticky_level == 0 );
1139    _Assert( new_scheduler_node->sticky_level == 0 );
1140
1141    _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1142    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1143    _Chain_Initialize_one(
1144      &the_thread->Scheduler.Wait_nodes,
1145      &new_scheduler_node->Thread.Wait_node
1146    );
1147    _Chain_Extract_unprotected(
1148      &old_scheduler_node->Thread.Scheduler_node.Chain
1149    );
1150    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1151    _Chain_Initialize_one(
1152      &the_thread->Scheduler.Scheduler_nodes,
1153      &new_scheduler_node->Thread.Scheduler_node.Chain
1154    );
1155
1156    _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1157
1158    if ( _States_Is_ready( current_state ) ) {
1159      _Scheduler_Unblock( the_thread );
1160    }
1161
1162    return STATUS_SUCCESSFUL;
1163  }
1164#endif
1165
1166  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1167  _Scheduler_Update_priority( the_thread );
1168  return STATUS_SUCCESSFUL;
1169}
1170
1171/** @} */
1172
1173#ifdef __cplusplus
1174}
1175#endif
1176
1177#endif
1178/* end of include file */
Note: See TracBrowser for help on using the repository browser.