source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ b2dbb634

5
Last change on this file since b2dbb634 was b2dbb634, checked in by Sebastian Huber <sebastian.huber@…>, on 10/10/17 at 09:36:23

score: Remove CPU_set_Control

Use Processor_mask instead.

Update #2514.

  • Property mode set to 100644
File size: 33.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2017 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/priorityimpl.h>
26#include <rtems/score/smpimpl.h>
27#include <rtems/score/status.h>
28#include <rtems/score/threadimpl.h>
29
30#ifdef __cplusplus
31extern "C" {
32#endif
33
34/**
35 * @addtogroup ScoreScheduler
36 */
37/**@{**/
38
39/**
40 *  @brief Initializes the scheduler to the policy chosen by the user.
41 *
42 *  This routine initializes the scheduler to the policy chosen by the user
43 *  through confdefs, or to the priority scheduler with ready chains by
44 *  default.
45 */
46void _Scheduler_Handler_initialization( void );
47
48RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
49  const Scheduler_Control *scheduler
50)
51{
52  return scheduler->context;
53}
54
55RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
56  const Per_CPU_Control *cpu
57)
58{
59#if defined(RTEMS_SMP)
60  return cpu->Scheduler.control;
61#else
62  (void) cpu;
63  return &_Scheduler_Table[ 0 ];
64#endif
65}
66
67/**
68 * @brief Acquires the scheduler instance inside a critical section (interrupts
69 * disabled).
70 *
71 * @param[in] scheduler The scheduler instance.
72 * @param[in] lock_context The lock context to use for
73 *   _Scheduler_Release_critical().
74 */
75RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
76  const Scheduler_Control *scheduler,
77  ISR_lock_Context        *lock_context
78)
79{
80#if defined(RTEMS_SMP)
81  Scheduler_Context *context;
82
83  context = _Scheduler_Get_context( scheduler );
84  _ISR_lock_Acquire( &context->Lock, lock_context );
85#else
86  (void) scheduler;
87  (void) lock_context;
88#endif
89}
90
91/**
92 * @brief Releases the scheduler instance inside a critical section (interrupts
93 * disabled).
94 *
95 * @param[in] scheduler The scheduler instance.
96 * @param[in] lock_context The lock context used for
97 *   _Scheduler_Acquire_critical().
98 */
99RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
100  const Scheduler_Control *scheduler,
101  ISR_lock_Context        *lock_context
102)
103{
104#if defined(RTEMS_SMP)
105  Scheduler_Context *context;
106
107  context = _Scheduler_Get_context( scheduler );
108  _ISR_lock_Release( &context->Lock, lock_context );
109#else
110  (void) scheduler;
111  (void) lock_context;
112#endif
113}
114
115#if defined(RTEMS_SMP)
116void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
117
118/**
119 * @brief Registers an ask for help request if necessary.
120 *
121 * The actual ask for help operation is carried out during
122 * _Thread_Do_dispatch() on a processor related to the thread.  This yields a
123 * better separation of scheduler instances.  A thread of one scheduler
124 * instance should not be forced to carry out too much work for threads on
125 * other scheduler instances.
126 *
127 * @param[in] the_thread The thread in need for help.
128 */
129RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
130{
131  _Assert( _Thread_State_is_owner( the_thread ) );
132
133  if ( the_thread->Scheduler.helping_nodes > 0 ) {
134    _Scheduler_Request_ask_for_help( the_thread );
135  }
136}
137#endif
138
139/**
140 * The preferred method to add a new scheduler is to define the jump table
141 * entries and add a case to the _Scheduler_Initialize routine.
142 *
143 * Generic scheduling implementations that rely on the ready queue only can
144 * be found in the _Scheduler_queue_XXX functions.
145 */
146
147/*
148 * Passing the Scheduler_Control* to these functions allows for multiple
149 * scheduler's to exist simultaneously, which could be useful on an SMP
150 * system.  Then remote Schedulers may be accessible.  How to protect such
151 * accesses remains an open problem.
152 */
153
154/**
155 * @brief General scheduling decision.
156 *
157 * This kernel routine implements the scheduling decision logic for
158 * the scheduler. It does NOT dispatch.
159 *
160 * @param[in] the_thread The thread which state changed previously.
161 */
162RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
163{
164  const Scheduler_Control *scheduler;
165  ISR_lock_Context         lock_context;
166
167  scheduler = _Thread_Scheduler_get_home( the_thread );
168  _Scheduler_Acquire_critical( scheduler, &lock_context );
169
170  ( *scheduler->Operations.schedule )( scheduler, the_thread );
171
172  _Scheduler_Release_critical( scheduler, &lock_context );
173}
174
175/**
176 * @brief Scheduler yield with a particular thread.
177 *
178 * This routine is invoked when a thread wishes to voluntarily transfer control
179 * of the processor to another thread.
180 *
181 * @param[in] the_thread The yielding thread.
182 */
183RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
184{
185  const Scheduler_Control *scheduler;
186  ISR_lock_Context         lock_context;
187
188  scheduler = _Thread_Scheduler_get_home( the_thread );
189  _Scheduler_Acquire_critical( scheduler, &lock_context );
190  ( *scheduler->Operations.yield )(
191    scheduler,
192    the_thread,
193    _Thread_Scheduler_get_home_node( the_thread )
194  );
195  _Scheduler_Release_critical( scheduler, &lock_context );
196}
197
198/**
199 * @brief Blocks a thread with respect to the scheduler.
200 *
201 * This routine removes @a the_thread from the scheduling decision for
202 * the scheduler. The primary task is to remove the thread from the
203 * ready queue.  It performs any necessary schedulering operations
204 * including the selection of a new heir thread.
205 *
206 * @param[in] the_thread The thread.
207 */
208RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
209{
210#if defined(RTEMS_SMP)
211  Chain_Node              *node;
212  const Chain_Node        *tail;
213  Scheduler_Node          *scheduler_node;
214  const Scheduler_Control *scheduler;
215  ISR_lock_Context         lock_context;
216
217  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
218  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
219
220  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
221  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
222
223  _Scheduler_Acquire_critical( scheduler, &lock_context );
224  ( *scheduler->Operations.block )(
225    scheduler,
226    the_thread,
227    scheduler_node
228  );
229  _Scheduler_Release_critical( scheduler, &lock_context );
230
231  node = _Chain_Next( node );
232
233  while ( node != tail ) {
234    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
235    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
236
237    _Scheduler_Acquire_critical( scheduler, &lock_context );
238    ( *scheduler->Operations.withdraw_node )(
239      scheduler,
240      the_thread,
241      scheduler_node,
242      THREAD_SCHEDULER_BLOCKED
243    );
244    _Scheduler_Release_critical( scheduler, &lock_context );
245
246    node = _Chain_Next( node );
247  }
248#else
249  const Scheduler_Control *scheduler;
250
251  scheduler = _Thread_Scheduler_get_home( the_thread );
252  ( *scheduler->Operations.block )(
253    scheduler,
254    the_thread,
255    _Thread_Scheduler_get_home_node( the_thread )
256  );
257#endif
258}
259
260/**
261 * @brief Unblocks a thread with respect to the scheduler.
262 *
263 * This operation must fetch the latest thread priority value for this
264 * scheduler instance and update its internal state if necessary.
265 *
266 * @param[in] the_thread The thread.
267 *
268 * @see _Scheduler_Node_get_priority().
269 */
270RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
271{
272  const Scheduler_Control *scheduler;
273  ISR_lock_Context         lock_context;
274
275  scheduler = _Thread_Scheduler_get_home( the_thread );
276  _Scheduler_Acquire_critical( scheduler, &lock_context );
277  ( *scheduler->Operations.unblock )(
278    scheduler,
279    the_thread,
280    _Thread_Scheduler_get_home_node( the_thread )
281  );
282  _Scheduler_Release_critical( scheduler, &lock_context );
283}
284
285/**
286 * @brief Propagates a priority change of a thread to the scheduler.
287 *
288 * On uni-processor configurations, this operation must evaluate the thread
289 * state.  In case the thread is not ready, then the priority update should be
290 * deferred to the next scheduler unblock operation.
291 *
292 * The operation must update the heir and thread dispatch necessary variables
293 * in case the set of scheduled threads changes.
294 *
295 * @param[in] the_thread The thread changing its priority.
296 *
297 * @see _Scheduler_Node_get_priority().
298 */
299RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
300{
301#if defined(RTEMS_SMP)
302  Chain_Node       *node;
303  const Chain_Node *tail;
304
305  _Thread_Scheduler_process_requests( the_thread );
306
307  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
308  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
309
310  do {
311    Scheduler_Node          *scheduler_node;
312    const Scheduler_Control *scheduler;
313    ISR_lock_Context         lock_context;
314
315    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
316    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
317
318    _Scheduler_Acquire_critical( scheduler, &lock_context );
319    ( *scheduler->Operations.update_priority )(
320      scheduler,
321      the_thread,
322      scheduler_node
323    );
324    _Scheduler_Release_critical( scheduler, &lock_context );
325
326    node = _Chain_Next( node );
327  } while ( node != tail );
328#else
329  const Scheduler_Control *scheduler;
330
331  scheduler = _Thread_Scheduler_get_home( the_thread );
332  ( *scheduler->Operations.update_priority )(
333    scheduler,
334    the_thread,
335    _Thread_Scheduler_get_home_node( the_thread )
336  );
337#endif
338}
339
340#if defined(RTEMS_SMP)
341/**
342 * @brief Changes the sticky level of the home scheduler node and propagates a
343 * priority change of a thread to the scheduler.
344 *
345 * @param[in] the_thread The thread changing its priority or sticky level.
346 *
347 * @see _Scheduler_Update_priority().
348 */
349RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
350  Thread_Control *the_thread,
351  int             sticky_level_change
352)
353{
354  Chain_Node              *node;
355  const Chain_Node        *tail;
356  Scheduler_Node          *scheduler_node;
357  const Scheduler_Control *scheduler;
358  ISR_lock_Context         lock_context;
359
360  _Thread_Scheduler_process_requests( the_thread );
361
362  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
363  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
364  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
365
366  _Scheduler_Acquire_critical( scheduler, &lock_context );
367
368  scheduler_node->sticky_level += sticky_level_change;
369  _Assert( scheduler_node->sticky_level >= 0 );
370
371  ( *scheduler->Operations.update_priority )(
372    scheduler,
373    the_thread,
374    scheduler_node
375  );
376
377  _Scheduler_Release_critical( scheduler, &lock_context );
378
379  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
380  node = _Chain_Next( node );
381
382  while ( node != tail ) {
383    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
384    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
385
386    _Scheduler_Acquire_critical( scheduler, &lock_context );
387    ( *scheduler->Operations.update_priority )(
388      scheduler,
389      the_thread,
390      scheduler_node
391    );
392    _Scheduler_Release_critical( scheduler, &lock_context );
393
394    node = _Chain_Next( node );
395  }
396}
397#endif
398
399/**
400 * @brief Maps a thread priority from the user domain to the scheduler domain.
401 *
402 * Let M be the maximum scheduler priority.  The mapping must be bijective in
403 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
404 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
405 * other values the mapping is undefined.
406 *
407 * @param[in] scheduler The scheduler instance.
408 * @param[in] priority The user domain thread priority.
409 *
410 * @return The corresponding thread priority of the scheduler domain is returned.
411 */
412RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
413  const Scheduler_Control *scheduler,
414  Priority_Control         priority
415)
416{
417  return ( *scheduler->Operations.map_priority )( scheduler, priority );
418}
419
420/**
421 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
422 *
423 * @param[in] scheduler The scheduler instance.
424 * @param[in] priority The scheduler domain thread priority.
425 *
426 * @return The corresponding thread priority of the user domain is returned.
427 */
428RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
429  const Scheduler_Control *scheduler,
430  Priority_Control         priority
431)
432{
433  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
434}
435
436/**
437 * @brief Initializes a scheduler node.
438 *
439 * The scheduler node contains arbitrary data on function entry.  The caller
440 * must ensure that _Scheduler_Node_destroy() will be called after a
441 * _Scheduler_Node_initialize() before the memory of the scheduler node is
442 * destroyed.
443 *
444 * @param[in] scheduler The scheduler instance.
445 * @param[in] node The scheduler node to initialize.
446 * @param[in] the_thread The thread of the scheduler node to initialize.
447 * @param[in] priority The thread priority.
448 */
449RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
450  const Scheduler_Control *scheduler,
451  Scheduler_Node          *node,
452  Thread_Control          *the_thread,
453  Priority_Control         priority
454)
455{
456  ( *scheduler->Operations.node_initialize )(
457    scheduler,
458    node,
459    the_thread,
460    priority
461  );
462}
463
464/**
465 * @brief Destroys a scheduler node.
466 *
467 * The caller must ensure that _Scheduler_Node_destroy() will be called only
468 * after a corresponding _Scheduler_Node_initialize().
469 *
470 * @param[in] scheduler The scheduler instance.
471 * @param[in] node The scheduler node to destroy.
472 */
473RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
474  const Scheduler_Control *scheduler,
475  Scheduler_Node          *node
476)
477{
478  ( *scheduler->Operations.node_destroy )( scheduler, node );
479}
480
481/**
482 * @brief Releases a job of a thread with respect to the scheduler.
483 *
484 * @param[in] the_thread The thread.
485 * @param[in] priority_node The priority node of the job.
486 * @param[in] deadline The deadline in watchdog ticks since boot.
487 * @param[in] queue_context The thread queue context to provide the set of
488 *   threads for _Thread_Priority_update().
489 */
490RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
491  Thread_Control       *the_thread,
492  Priority_Node        *priority_node,
493  uint64_t              deadline,
494  Thread_queue_Context *queue_context
495)
496{
497  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
498
499  _Thread_queue_Context_clear_priority_updates( queue_context );
500  ( *scheduler->Operations.release_job )(
501    scheduler,
502    the_thread,
503    priority_node,
504    deadline,
505    queue_context
506  );
507}
508
509/**
510 * @brief Cancels a job of a thread with respect to the scheduler.
511 *
512 * @param[in] the_thread The thread.
513 * @param[in] priority_node The priority node of the job.
514 * @param[in] queue_context The thread queue context to provide the set of
515 *   threads for _Thread_Priority_update().
516 */
517RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
518  Thread_Control       *the_thread,
519  Priority_Node        *priority_node,
520  Thread_queue_Context *queue_context
521)
522{
523  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
524
525  _Thread_queue_Context_clear_priority_updates( queue_context );
526  ( *scheduler->Operations.cancel_job )(
527    scheduler,
528    the_thread,
529    priority_node,
530    queue_context
531  );
532}
533
534/**
535 * @brief Scheduler method invoked at each clock tick.
536 *
537 * This method is invoked at each clock tick to allow the scheduler
538 * implementation to perform any activities required.  For the
539 * scheduler which support standard RTEMS features, this includes
540 * time-slicing management.
541 */
542RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
543{
544  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
545  Thread_Control *executing = cpu->executing;
546
547  if ( scheduler != NULL && executing != NULL ) {
548    ( *scheduler->Operations.tick )( scheduler, executing );
549  }
550}
551
552/**
553 * @brief Starts the idle thread for a particular processor.
554 *
555 * @param[in] scheduler The scheduler instance.
556 * @param[in,out] the_thread The idle thread for the processor.
557 * @param[in,out] cpu The processor for the idle thread.
558 *
559 * @see _Thread_Create_idle().
560 */
561RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
562  const Scheduler_Control *scheduler,
563  Thread_Control          *the_thread,
564  Per_CPU_Control         *cpu
565)
566{
567  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
568}
569
570RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
571  const Scheduler_Control *scheduler,
572  uint32_t                 cpu_index
573)
574{
575#if defined(RTEMS_SMP)
576  const Per_CPU_Control   *cpu;
577  const Scheduler_Control *scheduler_of_cpu;
578
579  cpu = _Per_CPU_Get_by_index( cpu_index );
580  scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
581
582  return scheduler_of_cpu == scheduler;
583#else
584  (void) scheduler;
585  (void) cpu_index;
586
587  return true;
588#endif
589}
590
591RTEMS_INLINE_ROUTINE const Processor_mask *_Scheduler_Get_processors(
592  const Scheduler_Control *scheduler
593)
594{
595#if defined(RTEMS_SMP)
596  return &_Scheduler_Get_context( scheduler )->Processors;
597#else
598  return &_Processor_mask_The_one_and_only;
599#endif
600}
601
602bool _Scheduler_Get_affinity(
603  Thread_Control *the_thread,
604  size_t          cpusetsize,
605  cpu_set_t      *cpuset
606);
607
608RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
609  const Scheduler_Control *scheduler,
610  Thread_Control          *the_thread,
611  Scheduler_Node          *node,
612  const Processor_mask    *affinity
613)
614{
615  (void) scheduler;
616  (void) the_thread;
617  (void) node;
618  return _Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() );
619}
620
621bool _Scheduler_Set_affinity(
622  Thread_Control  *the_thread,
623  size_t           cpusetsize,
624  const cpu_set_t *cpuset
625);
626
627RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
628  const Scheduler_Control *scheduler,
629  Thread_Control          *the_thread,
630  Scheduler_Node          *node,
631  void                  ( *extract )(
632                             const Scheduler_Control *,
633                             Thread_Control *,
634                             Scheduler_Node *
635                        ),
636  void                  ( *schedule )(
637                             const Scheduler_Control *,
638                             Thread_Control *,
639                             bool
640                        )
641)
642{
643  ( *extract )( scheduler, the_thread, node );
644
645  /* TODO: flash critical section? */
646
647  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
648    ( *schedule )( scheduler, the_thread, true );
649  }
650}
651
652RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
653  const Scheduler_Control *scheduler
654)
655{
656#if defined(RTEMS_SMP)
657  const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
658
659  return _Processor_mask_Count( &context->Processors );
660#else
661  (void) scheduler;
662
663  return 1;
664#endif
665}
666
667RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
668{
669  return _Objects_Build_id(
670    OBJECTS_FAKE_OBJECTS_API,
671    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
672    _Objects_Local_node,
673    (uint16_t) ( scheduler_index + 1 )
674  );
675}
676
677RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
678{
679  uint32_t minimum_id = _Scheduler_Build_id( 0 );
680
681  return id - minimum_id;
682}
683
684RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
685  Objects_Id id
686)
687{
688  uint32_t index;
689
690  index = _Scheduler_Get_index_by_id( id );
691
692  if ( index >= _Scheduler_Count ) {
693    return NULL;
694  }
695
696  return &_Scheduler_Table[ index ];
697}
698
699RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
700  const Scheduler_Control *scheduler
701)
702{
703  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
704}
705
706RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
707  Thread_Control   *the_thread,
708  Priority_Control  new_priority,
709  bool              prepend_it
710)
711{
712  Scheduler_Node *scheduler_node;
713
714  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
715  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
716}
717
718#if defined(RTEMS_SMP)
719/**
720 * @brief Gets an idle thread from the scheduler instance.
721 *
722 * @param[in] context The scheduler instance context.
723 *
724 * @retval idle An idle thread for use.  This function must always return an
725 * idle thread.  If none is available, then this is a fatal error.
726 */
727typedef Thread_Control *( *Scheduler_Get_idle_thread )(
728  Scheduler_Context *context
729);
730
731/**
732 * @brief Releases an idle thread to the scheduler instance for reuse.
733 *
734 * @param[in] context The scheduler instance context.
735 * @param[in] idle The idle thread to release
736 */
737typedef void ( *Scheduler_Release_idle_thread )(
738  Scheduler_Context *context,
739  Thread_Control    *idle
740);
741
742RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
743  Thread_Control         *the_thread,
744  Thread_Scheduler_state  new_state
745)
746{
747  _Assert(
748    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
749      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
750      || !_System_state_Is_up( _System_state_Get() )
751  );
752
753  the_thread->Scheduler.state = new_state;
754}
755
756RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
757  Scheduler_Node *node,
758  Thread_Control *idle
759)
760{
761  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
762  _Assert(
763    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
764  );
765
766  _Scheduler_Node_set_user( node, idle );
767  node->idle = idle;
768}
769
770/**
771 * @brief Use an idle thread for this scheduler node.
772 *
773 * A thread those home scheduler node has a sticky level greater than zero may
774 * use an idle thread in the home scheduler instance in case it executes
775 * currently in another scheduler instance or in case it is in a blocking
776 * state.
777 *
778 * @param[in] context The scheduler instance context.
779 * @param[in] node The node which wants to use the idle thread.
780 * @param[in] cpu The processor for the idle thread.
781 * @param[in] get_idle_thread Function to get an idle thread.
782 */
783RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
784  Scheduler_Context         *context,
785  Scheduler_Node            *node,
786  Per_CPU_Control           *cpu,
787  Scheduler_Get_idle_thread  get_idle_thread
788)
789{
790  Thread_Control *idle = ( *get_idle_thread )( context );
791
792  _Scheduler_Set_idle_thread( node, idle );
793  _Thread_Set_CPU( idle, cpu );
794  return idle;
795}
796
797typedef enum {
798  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
799  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
800  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
801} Scheduler_Try_to_schedule_action;
802
803/**
804 * @brief Try to schedule this scheduler node.
805 *
806 * @param[in] context The scheduler instance context.
807 * @param[in] node The node which wants to get scheduled.
808 * @param[in] idle A potential idle thread used by a potential victim node.
809 * @param[in] get_idle_thread Function to get an idle thread.
810 *
811 * @retval true This node can be scheduled.
812 * @retval false Otherwise.
813 */
814RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
815_Scheduler_Try_to_schedule_node(
816  Scheduler_Context         *context,
817  Scheduler_Node            *node,
818  Thread_Control            *idle,
819  Scheduler_Get_idle_thread  get_idle_thread
820)
821{
822  ISR_lock_Context                  lock_context;
823  Scheduler_Try_to_schedule_action  action;
824  Thread_Control                   *owner;
825
826  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
827  owner = _Scheduler_Node_get_owner( node );
828  _Assert( _Scheduler_Node_get_user( node ) == owner );
829  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
830
831  _Thread_Scheduler_acquire_critical( owner, &lock_context );
832
833  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
834    _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
835    _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
836  } else if (
837    owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
838      && node->sticky_level <= 1
839  ) {
840    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
841  } else if ( node->sticky_level == 0 ) {
842    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
843  } else if ( idle != NULL ) {
844    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
845  } else {
846    _Scheduler_Use_idle_thread(
847      context,
848      node,
849      _Thread_Get_CPU( owner ),
850      get_idle_thread
851    );
852  }
853
854  _Thread_Scheduler_release_critical( owner, &lock_context );
855  return action;
856}
857
858/**
859 * @brief Release an idle thread using this scheduler node.
860 *
861 * @param[in] context The scheduler instance context.
862 * @param[in] node The node which may have an idle thread as user.
863 * @param[in] release_idle_thread Function to release an idle thread.
864 *
865 * @retval idle The idle thread which used this node.
866 * @retval NULL This node had no idle thread as an user.
867 */
868RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
869  Scheduler_Context             *context,
870  Scheduler_Node                *node,
871  Scheduler_Release_idle_thread  release_idle_thread
872)
873{
874  Thread_Control *idle = _Scheduler_Node_get_idle( node );
875
876  if ( idle != NULL ) {
877    Thread_Control *owner = _Scheduler_Node_get_owner( node );
878
879    node->idle = NULL;
880    _Scheduler_Node_set_user( node, owner );
881    ( *release_idle_thread )( context, idle );
882  }
883
884  return idle;
885}
886
887RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
888  Scheduler_Node *needs_idle,
889  Scheduler_Node *uses_idle,
890  Thread_Control *idle
891)
892{
893  uses_idle->idle = NULL;
894  _Scheduler_Node_set_user(
895    uses_idle,
896    _Scheduler_Node_get_owner( uses_idle )
897  );
898  _Scheduler_Set_idle_thread( needs_idle, idle );
899}
900
901/**
902 * @brief Block this scheduler node.
903 *
904 * @param[in] context The scheduler instance context.
905 * @param[in] thread The thread which wants to get blocked referencing this
906 *   node.  This is not necessarily the user of this node in case the node
907 *   participates in the scheduler helping protocol.
908 * @param[in] node The node which wants to get blocked.
909 * @param[in] is_scheduled This node is scheduled.
910 * @param[in] get_idle_thread Function to get an idle thread.
911 *
912 * @retval thread_cpu The processor of the thread.  Indicates to continue with
913 *   the blocking operation.
914 * @retval NULL Otherwise.
915 */
916RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
917  Scheduler_Context         *context,
918  Thread_Control            *thread,
919  Scheduler_Node            *node,
920  bool                       is_scheduled,
921  Scheduler_Get_idle_thread  get_idle_thread
922)
923{
924  int               sticky_level;
925  ISR_lock_Context  lock_context;
926  Per_CPU_Control  *thread_cpu;
927
928  sticky_level = node->sticky_level;
929  --sticky_level;
930  node->sticky_level = sticky_level;
931  _Assert( sticky_level >= 0 );
932
933  _Thread_Scheduler_acquire_critical( thread, &lock_context );
934  thread_cpu = _Thread_Get_CPU( thread );
935  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
936  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
937  _Thread_Scheduler_release_critical( thread, &lock_context );
938
939  if ( sticky_level > 0 ) {
940    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
941      Thread_Control *idle;
942
943      idle = _Scheduler_Use_idle_thread(
944        context,
945        node,
946        thread_cpu,
947        get_idle_thread
948      );
949      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
950    }
951
952    return NULL;
953  }
954
955  _Assert( thread == _Scheduler_Node_get_user( node ) );
956  return thread_cpu;
957}
958
959RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
960  Scheduler_Context             *context,
961  Thread_Control                *the_thread,
962  Scheduler_Node                *node,
963  Scheduler_Release_idle_thread  release_idle_thread
964)
965{
966  Thread_Control  *idle;
967  Thread_Control  *owner;
968  Per_CPU_Control *cpu;
969
970  idle = _Scheduler_Node_get_idle( node );
971  owner = _Scheduler_Node_get_owner( node );
972
973  node->idle = NULL;
974  _Assert( _Scheduler_Node_get_user( node ) == idle );
975  _Scheduler_Node_set_user( node, owner );
976  ( *release_idle_thread )( context, idle );
977
978  cpu = _Thread_Get_CPU( idle );
979  _Thread_Set_CPU( the_thread, cpu );
980  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
981}
982
983/**
984 * @brief Unblock this scheduler node.
985 *
986 * @param[in] context The scheduler instance context.
987 * @param[in] the_thread The thread which wants to get unblocked.
988 * @param[in] node The node which wants to get unblocked.
989 * @param[in] is_scheduled This node is scheduled.
990 * @param[in] release_idle_thread Function to release an idle thread.
991 *
992 * @retval true Continue with the unblocking operation.
993 * @retval false Otherwise.
994 */
995RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
996  Scheduler_Context             *context,
997  Thread_Control                *the_thread,
998  Scheduler_Node                *node,
999  bool                           is_scheduled,
1000  Scheduler_Release_idle_thread  release_idle_thread
1001)
1002{
1003  bool unblock;
1004
1005  ++node->sticky_level;
1006  _Assert( node->sticky_level > 0 );
1007
1008  if ( is_scheduled ) {
1009    _Scheduler_Discard_idle_thread(
1010      context,
1011      the_thread,
1012      node,
1013      release_idle_thread
1014    );
1015    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1016    unblock = false;
1017  } else {
1018    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1019    unblock = true;
1020  }
1021
1022  return unblock;
1023}
1024#endif
1025
1026RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1027  Thread_Control *new_heir,
1028  bool            force_dispatch
1029)
1030{
1031  Thread_Control *heir = _Thread_Heir;
1032
1033  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1034#if defined(RTEMS_SMP)
1035    /*
1036     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1037     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1038     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1039     * schedulers.
1040     */
1041    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1042    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1043#endif
1044    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1045    _Thread_Heir = new_heir;
1046    _Thread_Dispatch_necessary = true;
1047  }
1048}
1049
1050RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1051  const Scheduler_Control *new_scheduler,
1052  Thread_Control          *the_thread,
1053  Priority_Control         priority
1054)
1055{
1056  Scheduler_Node          *new_scheduler_node;
1057  Scheduler_Node          *old_scheduler_node;
1058#if defined(RTEMS_SMP)
1059  ISR_lock_Context         lock_context;
1060  const Scheduler_Control *old_scheduler;
1061
1062#endif
1063
1064  if ( the_thread->Wait.queue != NULL ) {
1065    return STATUS_RESOURCE_IN_USE;
1066  }
1067
1068  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1069  _Priority_Plain_extract(
1070    &old_scheduler_node->Wait.Priority,
1071    &the_thread->Real_priority
1072  );
1073
1074  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1075    _Priority_Plain_insert(
1076      &old_scheduler_node->Wait.Priority,
1077      &the_thread->Real_priority,
1078      the_thread->Real_priority.priority
1079    );
1080    return STATUS_RESOURCE_IN_USE;
1081  }
1082
1083#if defined(RTEMS_SMP)
1084  if ( !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes ) ) {
1085    _Priority_Plain_insert(
1086      &old_scheduler_node->Wait.Priority,
1087      &the_thread->Real_priority,
1088      the_thread->Real_priority.priority
1089    );
1090    return STATUS_RESOURCE_IN_USE;
1091  }
1092
1093  old_scheduler = _Thread_Scheduler_get_home( the_thread );
1094  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1095    the_thread,
1096    _Scheduler_Get_index( new_scheduler )
1097  );
1098
1099  _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1100
1101  if (
1102    _Scheduler_Get_processor_count( new_scheduler ) == 0
1103      || !( *new_scheduler->Operations.set_affinity )(
1104        new_scheduler,
1105        the_thread,
1106        new_scheduler_node,
1107        &the_thread->Scheduler.Affinity
1108      )
1109  ) {
1110    _Scheduler_Release_critical( new_scheduler, &lock_context );
1111    _Priority_Plain_insert(
1112      &old_scheduler_node->Wait.Priority,
1113      &the_thread->Real_priority,
1114      the_thread->Real_priority.priority
1115    );
1116    return STATUS_UNSATISFIED;
1117  }
1118
1119  the_thread->Scheduler.home = new_scheduler;
1120
1121  _Scheduler_Release_critical( new_scheduler, &lock_context );
1122
1123  _Thread_Scheduler_process_requests( the_thread );
1124#else
1125  new_scheduler_node = old_scheduler_node;
1126#endif
1127
1128  the_thread->Start.initial_priority = priority;
1129  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1130  _Priority_Initialize_one(
1131    &new_scheduler_node->Wait.Priority,
1132    &the_thread->Real_priority
1133  );
1134
1135#if defined(RTEMS_SMP)
1136  if ( old_scheduler != new_scheduler ) {
1137    States_Control current_state;
1138
1139    current_state = the_thread->current_state;
1140
1141    if ( _States_Is_ready( current_state ) ) {
1142      _Scheduler_Block( the_thread );
1143    }
1144
1145    _Assert( old_scheduler_node->sticky_level == 0 );
1146    _Assert( new_scheduler_node->sticky_level == 0 );
1147
1148    _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1149    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1150    _Chain_Initialize_one(
1151      &the_thread->Scheduler.Wait_nodes,
1152      &new_scheduler_node->Thread.Wait_node
1153    );
1154    _Chain_Extract_unprotected(
1155      &old_scheduler_node->Thread.Scheduler_node.Chain
1156    );
1157    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1158    _Chain_Initialize_one(
1159      &the_thread->Scheduler.Scheduler_nodes,
1160      &new_scheduler_node->Thread.Scheduler_node.Chain
1161    );
1162
1163    _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1164
1165    if ( _States_Is_ready( current_state ) ) {
1166      _Scheduler_Unblock( the_thread );
1167    }
1168
1169    return STATUS_SUCCESSFUL;
1170  }
1171#endif
1172
1173  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1174  _Scheduler_Update_priority( the_thread );
1175  return STATUS_SUCCESSFUL;
1176}
1177
1178/** @} */
1179
1180#ifdef __cplusplus
1181}
1182#endif
1183
1184#endif
1185/* end of include file */
Note: See TracBrowser for help on using the repository browser.