source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 8388ceea

5
Last change on this file since 8388ceea was 8388ceea, checked in by Sebastian Huber <sebastian.huber@…>, on 10/24/17 at 04:31:03

score: Delete _Scheduler_Thread_set_priority()

  • Property mode set to 100644
File size: 32.7 KB
Line 
1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
5 *
6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
13 *  Copyright (c) 2014, 2017 embedded brains GmbH
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/priorityimpl.h>
26#include <rtems/score/smpimpl.h>
27#include <rtems/score/status.h>
28#include <rtems/score/threadimpl.h>
29
30#ifdef __cplusplus
31extern "C" {
32#endif
33
34/**
35 * @addtogroup ScoreScheduler
36 */
37/**@{**/
38
39/**
40 *  @brief Initializes the scheduler to the policy chosen by the user.
41 *
42 *  This routine initializes the scheduler to the policy chosen by the user
43 *  through confdefs, or to the priority scheduler with ready chains by
44 *  default.
45 */
46void _Scheduler_Handler_initialization( void );
47
48RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
49  const Scheduler_Control *scheduler
50)
51{
52  return scheduler->context;
53}
54
55RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
56  const Per_CPU_Control *cpu
57)
58{
59#if defined(RTEMS_SMP)
60  return cpu->Scheduler.control;
61#else
62  (void) cpu;
63  return &_Scheduler_Table[ 0 ];
64#endif
65}
66
67/**
68 * @brief Acquires the scheduler instance inside a critical section (interrupts
69 * disabled).
70 *
71 * @param[in] scheduler The scheduler instance.
72 * @param[in] lock_context The lock context to use for
73 *   _Scheduler_Release_critical().
74 */
75RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
76  const Scheduler_Control *scheduler,
77  ISR_lock_Context        *lock_context
78)
79{
80#if defined(RTEMS_SMP)
81  Scheduler_Context *context;
82
83  context = _Scheduler_Get_context( scheduler );
84  _ISR_lock_Acquire( &context->Lock, lock_context );
85#else
86  (void) scheduler;
87  (void) lock_context;
88#endif
89}
90
91/**
92 * @brief Releases the scheduler instance inside a critical section (interrupts
93 * disabled).
94 *
95 * @param[in] scheduler The scheduler instance.
96 * @param[in] lock_context The lock context used for
97 *   _Scheduler_Acquire_critical().
98 */
99RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
100  const Scheduler_Control *scheduler,
101  ISR_lock_Context        *lock_context
102)
103{
104#if defined(RTEMS_SMP)
105  Scheduler_Context *context;
106
107  context = _Scheduler_Get_context( scheduler );
108  _ISR_lock_Release( &context->Lock, lock_context );
109#else
110  (void) scheduler;
111  (void) lock_context;
112#endif
113}
114
115#if defined(RTEMS_SMP)
116void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
117
118/**
119 * @brief Registers an ask for help request if necessary.
120 *
121 * The actual ask for help operation is carried out during
122 * _Thread_Do_dispatch() on a processor related to the thread.  This yields a
123 * better separation of scheduler instances.  A thread of one scheduler
124 * instance should not be forced to carry out too much work for threads on
125 * other scheduler instances.
126 *
127 * @param[in] the_thread The thread in need for help.
128 */
129RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
130{
131  _Assert( _Thread_State_is_owner( the_thread ) );
132
133  if ( the_thread->Scheduler.helping_nodes > 0 ) {
134    _Scheduler_Request_ask_for_help( the_thread );
135  }
136}
137#endif
138
139/**
140 * The preferred method to add a new scheduler is to define the jump table
141 * entries and add a case to the _Scheduler_Initialize routine.
142 *
143 * Generic scheduling implementations that rely on the ready queue only can
144 * be found in the _Scheduler_queue_XXX functions.
145 */
146
147/*
148 * Passing the Scheduler_Control* to these functions allows for multiple
149 * scheduler's to exist simultaneously, which could be useful on an SMP
150 * system.  Then remote Schedulers may be accessible.  How to protect such
151 * accesses remains an open problem.
152 */
153
154/**
155 * @brief General scheduling decision.
156 *
157 * This kernel routine implements the scheduling decision logic for
158 * the scheduler. It does NOT dispatch.
159 *
160 * @param[in] the_thread The thread which state changed previously.
161 */
162RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
163{
164  const Scheduler_Control *scheduler;
165  ISR_lock_Context         lock_context;
166
167  scheduler = _Thread_Scheduler_get_home( the_thread );
168  _Scheduler_Acquire_critical( scheduler, &lock_context );
169
170  ( *scheduler->Operations.schedule )( scheduler, the_thread );
171
172  _Scheduler_Release_critical( scheduler, &lock_context );
173}
174
175/**
176 * @brief Scheduler yield with a particular thread.
177 *
178 * This routine is invoked when a thread wishes to voluntarily transfer control
179 * of the processor to another thread.
180 *
181 * @param[in] the_thread The yielding thread.
182 */
183RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
184{
185  const Scheduler_Control *scheduler;
186  ISR_lock_Context         lock_context;
187
188  scheduler = _Thread_Scheduler_get_home( the_thread );
189  _Scheduler_Acquire_critical( scheduler, &lock_context );
190  ( *scheduler->Operations.yield )(
191    scheduler,
192    the_thread,
193    _Thread_Scheduler_get_home_node( the_thread )
194  );
195  _Scheduler_Release_critical( scheduler, &lock_context );
196}
197
198/**
199 * @brief Blocks a thread with respect to the scheduler.
200 *
201 * This routine removes @a the_thread from the scheduling decision for
202 * the scheduler. The primary task is to remove the thread from the
203 * ready queue.  It performs any necessary schedulering operations
204 * including the selection of a new heir thread.
205 *
206 * @param[in] the_thread The thread.
207 */
208RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
209{
210#if defined(RTEMS_SMP)
211  Chain_Node              *node;
212  const Chain_Node        *tail;
213  Scheduler_Node          *scheduler_node;
214  const Scheduler_Control *scheduler;
215  ISR_lock_Context         lock_context;
216
217  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
218  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
219
220  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
221  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
222
223  _Scheduler_Acquire_critical( scheduler, &lock_context );
224  ( *scheduler->Operations.block )(
225    scheduler,
226    the_thread,
227    scheduler_node
228  );
229  _Scheduler_Release_critical( scheduler, &lock_context );
230
231  node = _Chain_Next( node );
232
233  while ( node != tail ) {
234    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
235    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
236
237    _Scheduler_Acquire_critical( scheduler, &lock_context );
238    ( *scheduler->Operations.withdraw_node )(
239      scheduler,
240      the_thread,
241      scheduler_node,
242      THREAD_SCHEDULER_BLOCKED
243    );
244    _Scheduler_Release_critical( scheduler, &lock_context );
245
246    node = _Chain_Next( node );
247  }
248#else
249  const Scheduler_Control *scheduler;
250
251  scheduler = _Thread_Scheduler_get_home( the_thread );
252  ( *scheduler->Operations.block )(
253    scheduler,
254    the_thread,
255    _Thread_Scheduler_get_home_node( the_thread )
256  );
257#endif
258}
259
260/**
261 * @brief Unblocks a thread with respect to the scheduler.
262 *
263 * This operation must fetch the latest thread priority value for this
264 * scheduler instance and update its internal state if necessary.
265 *
266 * @param[in] the_thread The thread.
267 *
268 * @see _Scheduler_Node_get_priority().
269 */
270RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
271{
272  const Scheduler_Control *scheduler;
273  ISR_lock_Context         lock_context;
274
275  scheduler = _Thread_Scheduler_get_home( the_thread );
276  _Scheduler_Acquire_critical( scheduler, &lock_context );
277  ( *scheduler->Operations.unblock )(
278    scheduler,
279    the_thread,
280    _Thread_Scheduler_get_home_node( the_thread )
281  );
282  _Scheduler_Release_critical( scheduler, &lock_context );
283}
284
285/**
286 * @brief Propagates a priority change of a thread to the scheduler.
287 *
288 * On uni-processor configurations, this operation must evaluate the thread
289 * state.  In case the thread is not ready, then the priority update should be
290 * deferred to the next scheduler unblock operation.
291 *
292 * The operation must update the heir and thread dispatch necessary variables
293 * in case the set of scheduled threads changes.
294 *
295 * @param[in] the_thread The thread changing its priority.
296 *
297 * @see _Scheduler_Node_get_priority().
298 */
299RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
300{
301#if defined(RTEMS_SMP)
302  Chain_Node       *node;
303  const Chain_Node *tail;
304
305  _Thread_Scheduler_process_requests( the_thread );
306
307  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
308  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
309
310  do {
311    Scheduler_Node          *scheduler_node;
312    const Scheduler_Control *scheduler;
313    ISR_lock_Context         lock_context;
314
315    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
316    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
317
318    _Scheduler_Acquire_critical( scheduler, &lock_context );
319    ( *scheduler->Operations.update_priority )(
320      scheduler,
321      the_thread,
322      scheduler_node
323    );
324    _Scheduler_Release_critical( scheduler, &lock_context );
325
326    node = _Chain_Next( node );
327  } while ( node != tail );
328#else
329  const Scheduler_Control *scheduler;
330
331  scheduler = _Thread_Scheduler_get_home( the_thread );
332  ( *scheduler->Operations.update_priority )(
333    scheduler,
334    the_thread,
335    _Thread_Scheduler_get_home_node( the_thread )
336  );
337#endif
338}
339
340#if defined(RTEMS_SMP)
341/**
342 * @brief Changes the sticky level of the home scheduler node and propagates a
343 * priority change of a thread to the scheduler.
344 *
345 * @param[in] the_thread The thread changing its priority or sticky level.
346 *
347 * @see _Scheduler_Update_priority().
348 */
349RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
350  Thread_Control *the_thread,
351  int             sticky_level_change
352)
353{
354  Chain_Node              *node;
355  const Chain_Node        *tail;
356  Scheduler_Node          *scheduler_node;
357  const Scheduler_Control *scheduler;
358  ISR_lock_Context         lock_context;
359
360  _Thread_Scheduler_process_requests( the_thread );
361
362  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
363  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
364  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
365
366  _Scheduler_Acquire_critical( scheduler, &lock_context );
367
368  scheduler_node->sticky_level += sticky_level_change;
369  _Assert( scheduler_node->sticky_level >= 0 );
370
371  ( *scheduler->Operations.update_priority )(
372    scheduler,
373    the_thread,
374    scheduler_node
375  );
376
377  _Scheduler_Release_critical( scheduler, &lock_context );
378
379  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
380  node = _Chain_Next( node );
381
382  while ( node != tail ) {
383    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
384    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
385
386    _Scheduler_Acquire_critical( scheduler, &lock_context );
387    ( *scheduler->Operations.update_priority )(
388      scheduler,
389      the_thread,
390      scheduler_node
391    );
392    _Scheduler_Release_critical( scheduler, &lock_context );
393
394    node = _Chain_Next( node );
395  }
396}
397#endif
398
399/**
400 * @brief Maps a thread priority from the user domain to the scheduler domain.
401 *
402 * Let M be the maximum scheduler priority.  The mapping must be bijective in
403 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
404 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
405 * other values the mapping is undefined.
406 *
407 * @param[in] scheduler The scheduler instance.
408 * @param[in] priority The user domain thread priority.
409 *
410 * @return The corresponding thread priority of the scheduler domain is returned.
411 */
412RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
413  const Scheduler_Control *scheduler,
414  Priority_Control         priority
415)
416{
417  return ( *scheduler->Operations.map_priority )( scheduler, priority );
418}
419
420/**
421 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
422 *
423 * @param[in] scheduler The scheduler instance.
424 * @param[in] priority The scheduler domain thread priority.
425 *
426 * @return The corresponding thread priority of the user domain is returned.
427 */
428RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
429  const Scheduler_Control *scheduler,
430  Priority_Control         priority
431)
432{
433  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
434}
435
436/**
437 * @brief Initializes a scheduler node.
438 *
439 * The scheduler node contains arbitrary data on function entry.  The caller
440 * must ensure that _Scheduler_Node_destroy() will be called after a
441 * _Scheduler_Node_initialize() before the memory of the scheduler node is
442 * destroyed.
443 *
444 * @param[in] scheduler The scheduler instance.
445 * @param[in] node The scheduler node to initialize.
446 * @param[in] the_thread The thread of the scheduler node to initialize.
447 * @param[in] priority The thread priority.
448 */
449RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
450  const Scheduler_Control *scheduler,
451  Scheduler_Node          *node,
452  Thread_Control          *the_thread,
453  Priority_Control         priority
454)
455{
456  ( *scheduler->Operations.node_initialize )(
457    scheduler,
458    node,
459    the_thread,
460    priority
461  );
462}
463
464/**
465 * @brief Destroys a scheduler node.
466 *
467 * The caller must ensure that _Scheduler_Node_destroy() will be called only
468 * after a corresponding _Scheduler_Node_initialize().
469 *
470 * @param[in] scheduler The scheduler instance.
471 * @param[in] node The scheduler node to destroy.
472 */
473RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
474  const Scheduler_Control *scheduler,
475  Scheduler_Node          *node
476)
477{
478  ( *scheduler->Operations.node_destroy )( scheduler, node );
479}
480
481/**
482 * @brief Releases a job of a thread with respect to the scheduler.
483 *
484 * @param[in] the_thread The thread.
485 * @param[in] priority_node The priority node of the job.
486 * @param[in] deadline The deadline in watchdog ticks since boot.
487 * @param[in] queue_context The thread queue context to provide the set of
488 *   threads for _Thread_Priority_update().
489 */
490RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
491  Thread_Control       *the_thread,
492  Priority_Node        *priority_node,
493  uint64_t              deadline,
494  Thread_queue_Context *queue_context
495)
496{
497  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
498
499  _Thread_queue_Context_clear_priority_updates( queue_context );
500  ( *scheduler->Operations.release_job )(
501    scheduler,
502    the_thread,
503    priority_node,
504    deadline,
505    queue_context
506  );
507}
508
509/**
510 * @brief Cancels a job of a thread with respect to the scheduler.
511 *
512 * @param[in] the_thread The thread.
513 * @param[in] priority_node The priority node of the job.
514 * @param[in] queue_context The thread queue context to provide the set of
515 *   threads for _Thread_Priority_update().
516 */
517RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
518  Thread_Control       *the_thread,
519  Priority_Node        *priority_node,
520  Thread_queue_Context *queue_context
521)
522{
523  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
524
525  _Thread_queue_Context_clear_priority_updates( queue_context );
526  ( *scheduler->Operations.cancel_job )(
527    scheduler,
528    the_thread,
529    priority_node,
530    queue_context
531  );
532}
533
534/**
535 * @brief Scheduler method invoked at each clock tick.
536 *
537 * This method is invoked at each clock tick to allow the scheduler
538 * implementation to perform any activities required.  For the
539 * scheduler which support standard RTEMS features, this includes
540 * time-slicing management.
541 */
542RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
543{
544  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
545  Thread_Control *executing = cpu->executing;
546
547  if ( scheduler != NULL && executing != NULL ) {
548    ( *scheduler->Operations.tick )( scheduler, executing );
549  }
550}
551
552/**
553 * @brief Starts the idle thread for a particular processor.
554 *
555 * @param[in] scheduler The scheduler instance.
556 * @param[in,out] the_thread The idle thread for the processor.
557 * @param[in,out] cpu The processor for the idle thread.
558 *
559 * @see _Thread_Create_idle().
560 */
561RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
562  const Scheduler_Control *scheduler,
563  Thread_Control          *the_thread,
564  Per_CPU_Control         *cpu
565)
566{
567  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
568}
569
570RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
571  const Scheduler_Control *scheduler,
572  uint32_t                 cpu_index
573)
574{
575#if defined(RTEMS_SMP)
576  const Per_CPU_Control   *cpu;
577  const Scheduler_Control *scheduler_of_cpu;
578
579  cpu = _Per_CPU_Get_by_index( cpu_index );
580  scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
581
582  return scheduler_of_cpu == scheduler;
583#else
584  (void) scheduler;
585  (void) cpu_index;
586
587  return true;
588#endif
589}
590
591RTEMS_INLINE_ROUTINE const Processor_mask *_Scheduler_Get_processors(
592  const Scheduler_Control *scheduler
593)
594{
595#if defined(RTEMS_SMP)
596  return &_Scheduler_Get_context( scheduler )->Processors;
597#else
598  return &_Processor_mask_The_one_and_only;
599#endif
600}
601
602bool _Scheduler_Get_affinity(
603  Thread_Control *the_thread,
604  size_t          cpusetsize,
605  cpu_set_t      *cpuset
606);
607
608RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
609  const Scheduler_Control *scheduler,
610  Thread_Control          *the_thread,
611  Scheduler_Node          *node,
612  const Processor_mask    *affinity
613)
614{
615  (void) scheduler;
616  (void) the_thread;
617  (void) node;
618  return _Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() );
619}
620
621bool _Scheduler_Set_affinity(
622  Thread_Control  *the_thread,
623  size_t           cpusetsize,
624  const cpu_set_t *cpuset
625);
626
627RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
628  const Scheduler_Control *scheduler,
629  Thread_Control          *the_thread,
630  Scheduler_Node          *node,
631  void                  ( *extract )(
632                             const Scheduler_Control *,
633                             Thread_Control *,
634                             Scheduler_Node *
635                        ),
636  void                  ( *schedule )(
637                             const Scheduler_Control *,
638                             Thread_Control *,
639                             bool
640                        )
641)
642{
643  ( *extract )( scheduler, the_thread, node );
644
645  /* TODO: flash critical section? */
646
647  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
648    ( *schedule )( scheduler, the_thread, true );
649  }
650}
651
652RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
653  const Scheduler_Control *scheduler
654)
655{
656#if defined(RTEMS_SMP)
657  const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
658
659  return _Processor_mask_Count( &context->Processors );
660#else
661  (void) scheduler;
662
663  return 1;
664#endif
665}
666
667RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
668{
669  return _Objects_Build_id(
670    OBJECTS_FAKE_OBJECTS_API,
671    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
672    _Objects_Local_node,
673    (uint16_t) ( scheduler_index + 1 )
674  );
675}
676
677RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
678{
679  uint32_t minimum_id = _Scheduler_Build_id( 0 );
680
681  return id - minimum_id;
682}
683
684RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
685  Objects_Id id
686)
687{
688  uint32_t index;
689
690  index = _Scheduler_Get_index_by_id( id );
691
692  if ( index >= _Scheduler_Count ) {
693    return NULL;
694  }
695
696  return &_Scheduler_Table[ index ];
697}
698
699RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
700  const Scheduler_Control *scheduler
701)
702{
703  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
704}
705
706#if defined(RTEMS_SMP)
707/**
708 * @brief Gets an idle thread from the scheduler instance.
709 *
710 * @param[in] context The scheduler instance context.
711 *
712 * @retval idle An idle thread for use.  This function must always return an
713 * idle thread.  If none is available, then this is a fatal error.
714 */
715typedef Thread_Control *( *Scheduler_Get_idle_thread )(
716  Scheduler_Context *context
717);
718
719/**
720 * @brief Releases an idle thread to the scheduler instance for reuse.
721 *
722 * @param[in] context The scheduler instance context.
723 * @param[in] idle The idle thread to release
724 */
725typedef void ( *Scheduler_Release_idle_thread )(
726  Scheduler_Context *context,
727  Thread_Control    *idle
728);
729
730RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
731  Thread_Control         *the_thread,
732  Thread_Scheduler_state  new_state
733)
734{
735  _Assert(
736    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
737      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
738      || !_System_state_Is_up( _System_state_Get() )
739  );
740
741  the_thread->Scheduler.state = new_state;
742}
743
744RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
745  Scheduler_Node *node,
746  Thread_Control *idle
747)
748{
749  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
750  _Assert(
751    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
752  );
753
754  _Scheduler_Node_set_user( node, idle );
755  node->idle = idle;
756}
757
758/**
759 * @brief Use an idle thread for this scheduler node.
760 *
761 * A thread those home scheduler node has a sticky level greater than zero may
762 * use an idle thread in the home scheduler instance in case it executes
763 * currently in another scheduler instance or in case it is in a blocking
764 * state.
765 *
766 * @param[in] context The scheduler instance context.
767 * @param[in] node The node which wants to use the idle thread.
768 * @param[in] cpu The processor for the idle thread.
769 * @param[in] get_idle_thread Function to get an idle thread.
770 */
771RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
772  Scheduler_Context         *context,
773  Scheduler_Node            *node,
774  Per_CPU_Control           *cpu,
775  Scheduler_Get_idle_thread  get_idle_thread
776)
777{
778  Thread_Control *idle = ( *get_idle_thread )( context );
779
780  _Scheduler_Set_idle_thread( node, idle );
781  _Thread_Set_CPU( idle, cpu );
782  return idle;
783}
784
785typedef enum {
786  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
787  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
788  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
789} Scheduler_Try_to_schedule_action;
790
791/**
792 * @brief Try to schedule this scheduler node.
793 *
794 * @param[in] context The scheduler instance context.
795 * @param[in] node The node which wants to get scheduled.
796 * @param[in] idle A potential idle thread used by a potential victim node.
797 * @param[in] get_idle_thread Function to get an idle thread.
798 *
799 * @retval true This node can be scheduled.
800 * @retval false Otherwise.
801 */
802RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
803_Scheduler_Try_to_schedule_node(
804  Scheduler_Context         *context,
805  Scheduler_Node            *node,
806  Thread_Control            *idle,
807  Scheduler_Get_idle_thread  get_idle_thread
808)
809{
810  ISR_lock_Context                  lock_context;
811  Scheduler_Try_to_schedule_action  action;
812  Thread_Control                   *owner;
813
814  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
815  owner = _Scheduler_Node_get_owner( node );
816  _Assert( _Scheduler_Node_get_user( node ) == owner );
817  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
818
819  _Thread_Scheduler_acquire_critical( owner, &lock_context );
820
821  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
822    _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
823    _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
824  } else if (
825    owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
826      && node->sticky_level <= 1
827  ) {
828    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
829  } else if ( node->sticky_level == 0 ) {
830    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
831  } else if ( idle != NULL ) {
832    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
833  } else {
834    _Scheduler_Use_idle_thread(
835      context,
836      node,
837      _Thread_Get_CPU( owner ),
838      get_idle_thread
839    );
840  }
841
842  _Thread_Scheduler_release_critical( owner, &lock_context );
843  return action;
844}
845
846/**
847 * @brief Release an idle thread using this scheduler node.
848 *
849 * @param[in] context The scheduler instance context.
850 * @param[in] node The node which may have an idle thread as user.
851 * @param[in] release_idle_thread Function to release an idle thread.
852 *
853 * @retval idle The idle thread which used this node.
854 * @retval NULL This node had no idle thread as an user.
855 */
856RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
857  Scheduler_Context             *context,
858  Scheduler_Node                *node,
859  Scheduler_Release_idle_thread  release_idle_thread
860)
861{
862  Thread_Control *idle = _Scheduler_Node_get_idle( node );
863
864  if ( idle != NULL ) {
865    Thread_Control *owner = _Scheduler_Node_get_owner( node );
866
867    node->idle = NULL;
868    _Scheduler_Node_set_user( node, owner );
869    ( *release_idle_thread )( context, idle );
870  }
871
872  return idle;
873}
874
875RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
876  Scheduler_Node *needs_idle,
877  Scheduler_Node *uses_idle,
878  Thread_Control *idle
879)
880{
881  uses_idle->idle = NULL;
882  _Scheduler_Node_set_user(
883    uses_idle,
884    _Scheduler_Node_get_owner( uses_idle )
885  );
886  _Scheduler_Set_idle_thread( needs_idle, idle );
887}
888
889/**
890 * @brief Block this scheduler node.
891 *
892 * @param[in] context The scheduler instance context.
893 * @param[in] thread The thread which wants to get blocked referencing this
894 *   node.  This is not necessarily the user of this node in case the node
895 *   participates in the scheduler helping protocol.
896 * @param[in] node The node which wants to get blocked.
897 * @param[in] is_scheduled This node is scheduled.
898 * @param[in] get_idle_thread Function to get an idle thread.
899 *
900 * @retval thread_cpu The processor of the thread.  Indicates to continue with
901 *   the blocking operation.
902 * @retval NULL Otherwise.
903 */
904RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
905  Scheduler_Context         *context,
906  Thread_Control            *thread,
907  Scheduler_Node            *node,
908  bool                       is_scheduled,
909  Scheduler_Get_idle_thread  get_idle_thread
910)
911{
912  int               sticky_level;
913  ISR_lock_Context  lock_context;
914  Per_CPU_Control  *thread_cpu;
915
916  sticky_level = node->sticky_level;
917  --sticky_level;
918  node->sticky_level = sticky_level;
919  _Assert( sticky_level >= 0 );
920
921  _Thread_Scheduler_acquire_critical( thread, &lock_context );
922  thread_cpu = _Thread_Get_CPU( thread );
923  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
924  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
925  _Thread_Scheduler_release_critical( thread, &lock_context );
926
927  if ( sticky_level > 0 ) {
928    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
929      Thread_Control *idle;
930
931      idle = _Scheduler_Use_idle_thread(
932        context,
933        node,
934        thread_cpu,
935        get_idle_thread
936      );
937      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
938    }
939
940    return NULL;
941  }
942
943  _Assert( thread == _Scheduler_Node_get_user( node ) );
944  return thread_cpu;
945}
946
947RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
948  Scheduler_Context             *context,
949  Thread_Control                *the_thread,
950  Scheduler_Node                *node,
951  Scheduler_Release_idle_thread  release_idle_thread
952)
953{
954  Thread_Control  *idle;
955  Thread_Control  *owner;
956  Per_CPU_Control *cpu;
957
958  idle = _Scheduler_Node_get_idle( node );
959  owner = _Scheduler_Node_get_owner( node );
960
961  node->idle = NULL;
962  _Assert( _Scheduler_Node_get_user( node ) == idle );
963  _Scheduler_Node_set_user( node, owner );
964  ( *release_idle_thread )( context, idle );
965
966  cpu = _Thread_Get_CPU( idle );
967  _Thread_Set_CPU( the_thread, cpu );
968  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
969}
970
971/**
972 * @brief Unblock this scheduler node.
973 *
974 * @param[in] context The scheduler instance context.
975 * @param[in] the_thread The thread which wants to get unblocked.
976 * @param[in] node The node which wants to get unblocked.
977 * @param[in] is_scheduled This node is scheduled.
978 * @param[in] release_idle_thread Function to release an idle thread.
979 *
980 * @retval true Continue with the unblocking operation.
981 * @retval false Otherwise.
982 */
983RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
984  Scheduler_Context             *context,
985  Thread_Control                *the_thread,
986  Scheduler_Node                *node,
987  bool                           is_scheduled,
988  Scheduler_Release_idle_thread  release_idle_thread
989)
990{
991  bool unblock;
992
993  ++node->sticky_level;
994  _Assert( node->sticky_level > 0 );
995
996  if ( is_scheduled ) {
997    _Scheduler_Discard_idle_thread(
998      context,
999      the_thread,
1000      node,
1001      release_idle_thread
1002    );
1003    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1004    unblock = false;
1005  } else {
1006    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1007    unblock = true;
1008  }
1009
1010  return unblock;
1011}
1012#endif
1013
1014RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1015  Thread_Control *new_heir,
1016  bool            force_dispatch
1017)
1018{
1019  Thread_Control *heir = _Thread_Heir;
1020
1021  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1022#if defined(RTEMS_SMP)
1023    /*
1024     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1025     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1026     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1027     * schedulers.
1028     */
1029    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1030    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1031#endif
1032    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1033    _Thread_Heir = new_heir;
1034    _Thread_Dispatch_necessary = true;
1035  }
1036}
1037
1038RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1039  const Scheduler_Control *new_scheduler,
1040  Thread_Control          *the_thread,
1041  Priority_Control         priority
1042)
1043{
1044  Scheduler_Node          *new_scheduler_node;
1045  Scheduler_Node          *old_scheduler_node;
1046#if defined(RTEMS_SMP)
1047  ISR_lock_Context         lock_context;
1048  const Scheduler_Control *old_scheduler;
1049
1050#endif
1051
1052  if ( the_thread->Wait.queue != NULL ) {
1053    return STATUS_RESOURCE_IN_USE;
1054  }
1055
1056  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1057  _Priority_Plain_extract(
1058    &old_scheduler_node->Wait.Priority,
1059    &the_thread->Real_priority
1060  );
1061
1062  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
1063    _Priority_Plain_insert(
1064      &old_scheduler_node->Wait.Priority,
1065      &the_thread->Real_priority,
1066      the_thread->Real_priority.priority
1067    );
1068    return STATUS_RESOURCE_IN_USE;
1069  }
1070
1071#if defined(RTEMS_SMP)
1072  if ( !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes ) ) {
1073    _Priority_Plain_insert(
1074      &old_scheduler_node->Wait.Priority,
1075      &the_thread->Real_priority,
1076      the_thread->Real_priority.priority
1077    );
1078    return STATUS_RESOURCE_IN_USE;
1079  }
1080
1081  old_scheduler = _Thread_Scheduler_get_home( the_thread );
1082  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1083    the_thread,
1084    _Scheduler_Get_index( new_scheduler )
1085  );
1086
1087  _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1088
1089  if (
1090    _Scheduler_Get_processor_count( new_scheduler ) == 0
1091      || !( *new_scheduler->Operations.set_affinity )(
1092        new_scheduler,
1093        the_thread,
1094        new_scheduler_node,
1095        &the_thread->Scheduler.Affinity
1096      )
1097  ) {
1098    _Scheduler_Release_critical( new_scheduler, &lock_context );
1099    _Priority_Plain_insert(
1100      &old_scheduler_node->Wait.Priority,
1101      &the_thread->Real_priority,
1102      the_thread->Real_priority.priority
1103    );
1104    return STATUS_UNSATISFIED;
1105  }
1106
1107  the_thread->Scheduler.home = new_scheduler;
1108
1109  _Scheduler_Release_critical( new_scheduler, &lock_context );
1110
1111  _Thread_Scheduler_process_requests( the_thread );
1112#else
1113  new_scheduler_node = old_scheduler_node;
1114#endif
1115
1116  the_thread->Start.initial_priority = priority;
1117  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1118  _Priority_Initialize_one(
1119    &new_scheduler_node->Wait.Priority,
1120    &the_thread->Real_priority
1121  );
1122
1123#if defined(RTEMS_SMP)
1124  if ( old_scheduler != new_scheduler ) {
1125    States_Control current_state;
1126
1127    current_state = the_thread->current_state;
1128
1129    if ( _States_Is_ready( current_state ) ) {
1130      _Scheduler_Block( the_thread );
1131    }
1132
1133    _Assert( old_scheduler_node->sticky_level == 0 );
1134    _Assert( new_scheduler_node->sticky_level == 0 );
1135
1136    _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1137    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1138    _Chain_Initialize_one(
1139      &the_thread->Scheduler.Wait_nodes,
1140      &new_scheduler_node->Thread.Wait_node
1141    );
1142    _Chain_Extract_unprotected(
1143      &old_scheduler_node->Thread.Scheduler_node.Chain
1144    );
1145    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1146    _Chain_Initialize_one(
1147      &the_thread->Scheduler.Scheduler_nodes,
1148      &new_scheduler_node->Thread.Scheduler_node.Chain
1149    );
1150
1151    _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1152
1153    if ( _States_Is_ready( current_state ) ) {
1154      _Scheduler_Unblock( the_thread );
1155    }
1156
1157    return STATUS_SUCCESSFUL;
1158  }
1159#endif
1160
1161  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1162  _Scheduler_Update_priority( the_thread );
1163  return STATUS_SUCCESSFUL;
1164}
1165
1166/** @} */
1167
1168#ifdef __cplusplus
1169}
1170#endif
1171
1172#endif
1173/* end of include file */
Note: See TracBrowser for help on using the repository browser.