source: rtems/cpukit/include/rtems/score/schedulerimpl.h @ 11e7893

5
Last change on this file since 11e7893 was 11e7893, checked in by Andreas Dachsberger <andreas.dachsberger@…>, on 04/12/19 at 09:55:49

doxygen: score: adjust doc in schedulerimpl.h to doxygen guidelines

Update #3706.

  • Property mode set to 100644
File size: 38.4 KB
RevLine 
[1f0d013]1/**
2 * @file
3 *
[11e7893]4 * @ingroup RTEMSScoreScheduler
5 *
[1f0d013]6 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
[0faa9dad]7 *
[1f0d013]8 * This inline file contains all of the inlined routines associated with
9 * the manipulation of the scheduler.
[0faa9dad]10 */
11
12/*
13 *  Copyright (C) 2010 Gedare Bloom.
[010192d]14 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
[088acbb0]15 *  Copyright (c) 2014, 2017 embedded brains GmbH
[0faa9dad]16 *
17 *  The license and distribution terms for this file may be
18 *  found in the file LICENSE in this distribution or at
[c499856]19 *  http://www.rtems.org/license/LICENSE.
[0faa9dad]20 */
21
[c6e21ee1]22#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
23#define _RTEMS_SCORE_SCHEDULERIMPL_H
24
25#include <rtems/score/scheduler.h>
[a7a8ec03]26#include <rtems/score/assert.h>
[300f6a48]27#include <rtems/score/priorityimpl.h>
[c5831a3f]28#include <rtems/score/smpimpl.h>
[c0bd006]29#include <rtems/score/status.h>
[e5ca54c9]30#include <rtems/score/threadimpl.h>
[0faa9dad]31
[c6e21ee1]32#ifdef __cplusplus
33extern "C" {
34#endif
[0faa9dad]35
36/**
[4c20da4b]37 * @addtogroup RTEMSScoreScheduler
[11e7893]38 *
39 * @{
[0faa9dad]40 */
41
[c597fb1]42/**
43 * @brief Maps a priority value to support the append indicator.
44 */
45#define SCHEDULER_PRIORITY_MAP( priority ) ( ( priority ) << 1 )
46
47/**
48 * @brief Returns the plain priority value.
49 */
50#define SCHEDULER_PRIORITY_UNMAP( priority ) ( ( priority ) >> 1 )
51
52/**
53 * @brief Clears the priority append indicator bit.
54 */
55#define SCHEDULER_PRIORITY_PURIFY( priority )  \
56  ( ( priority ) & ~( (Priority_Control) SCHEDULER_PRIORITY_APPEND_FLAG ) )
57
58/**
59 * @brief Returns the priority control with the append indicator bit set.
60 */
61#define SCHEDULER_PRIORITY_APPEND( priority )  \
62  ( ( priority ) | SCHEDULER_PRIORITY_APPEND_FLAG )
63
64/**
65 * @brief Returns true, if the item should be appended to its priority group,
66 * otherwise returns false and the item should be prepended to its priority
67 * group.
68 */
69#define SCHEDULER_PRIORITY_IS_APPEND( priority ) \
70  ( ( ( priority ) & SCHEDULER_PRIORITY_APPEND_FLAG ) != 0 )
71
[c6e21ee1]72/**
[11e7893]73 * @brief Initializes the scheduler to the policy chosen by the user.
[c6e21ee1]74 *
[11e7893]75 * This routine initializes the scheduler to the policy chosen by the user
76 * through confdefs, or to the priority scheduler with ready chains by
77 * default.
[c6e21ee1]78 */
79void _Scheduler_Handler_initialization( void );
80
[11e7893]81/**
82 * @brief Gets the context of the scheduler.
83 *
84 * @param scheduler The scheduler to get the context of.
85 *
86 * @return The context of @a scheduler.
87 */
[5c3d250]88RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
89  const Scheduler_Control *scheduler
90)
91{
92  return scheduler->context;
93}
94
[11e7893]95/**
96 * @brief Gets the scheduler for the cpu.
97 *
98 * @param cpu The cpu control to get the scheduler of.
99 *
100 * @return The scheduler for the cpu.
101 */
[1c46b80]102RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
103  const Per_CPU_Control *cpu
[c5831a3f]104)
105{
106#if defined(RTEMS_SMP)
[1c46b80]107  return cpu->Scheduler.control;
[c5831a3f]108#else
[1c46b80]109  (void) cpu;
[c5831a3f]110  return &_Scheduler_Table[ 0 ];
111#endif
112}
113
[bd12dda]114/**
115 * @brief Acquires the scheduler instance inside a critical section (interrupts
116 * disabled).
117 *
[11e7893]118 * @param scheduler The scheduler instance.
119 * @param lock_context The lock context to use for
[bd12dda]120 *   _Scheduler_Release_critical().
121 */
122RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
123  const Scheduler_Control *scheduler,
124  ISR_lock_Context        *lock_context
125)
126{
[913864c]127#if defined(RTEMS_SMP)
128  Scheduler_Context *context;
129
130  context = _Scheduler_Get_context( scheduler );
131  _ISR_lock_Acquire( &context->Lock, lock_context );
132#else
[bd12dda]133  (void) scheduler;
[913864c]134  (void) lock_context;
135#endif
[bd12dda]136}
137
138/**
139 * @brief Releases the scheduler instance inside a critical section (interrupts
140 * disabled).
141 *
[11e7893]142 * @param scheduler The scheduler instance.
143 * @param lock_context The lock context used for
[bd12dda]144 *   _Scheduler_Acquire_critical().
145 */
146RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
147  const Scheduler_Control *scheduler,
148  ISR_lock_Context        *lock_context
149)
150{
[913864c]151#if defined(RTEMS_SMP)
152  Scheduler_Context *context;
153
154  context = _Scheduler_Get_context( scheduler );
155  _ISR_lock_Release( &context->Lock, lock_context );
156#else
[bd12dda]157  (void) scheduler;
[913864c]158  (void) lock_context;
159#endif
[bd12dda]160}
161
[ca1e546e]162#if defined(RTEMS_SMP)
[088acbb0]163void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
164
[ca1e546e]165/**
[088acbb0]166 * @brief Registers an ask for help request if necessary.
[ca1e546e]167 *
168 * The actual ask for help operation is carried out during
169 * _Thread_Do_dispatch() on a processor related to the thread.  This yields a
170 * better separation of scheduler instances.  A thread of one scheduler
171 * instance should not be forced to carry out too much work for threads on
172 * other scheduler instances.
173 *
[11e7893]174 * @param the_thread The thread in need for help.
[ca1e546e]175 */
176RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
177{
178  _Assert( _Thread_State_is_owner( the_thread ) );
179
180  if ( the_thread->Scheduler.helping_nodes > 0 ) {
[088acbb0]181    _Scheduler_Request_ask_for_help( the_thread );
[ca1e546e]182  }
183}
184#endif
185
[0faa9dad]186/**
[1f0d013]187 * The preferred method to add a new scheduler is to define the jump table
188 * entries and add a case to the _Scheduler_Initialize routine.
[0faa9dad]189 *
[1f0d013]190 * Generic scheduling implementations that rely on the ready queue only can
[0faa9dad]191 * be found in the _Scheduler_queue_XXX functions.
192 */
193
[1f0d013]194/*
195 * Passing the Scheduler_Control* to these functions allows for multiple
196 * scheduler's to exist simultaneously, which could be useful on an SMP
197 * system.  Then remote Schedulers may be accessible.  How to protect such
[0faa9dad]198 * accesses remains an open problem.
199 */
200
[1f0d013]201/**
[92635cb]202 * @brief General scheduling decision.
[0faa9dad]203 *
[1f0d013]204 * This kernel routine implements the scheduling decision logic for
205 * the scheduler. It does NOT dispatch.
[e5ca54c9]206 *
[11e7893]207 * @param the_thread The thread which state changed previously.
[0faa9dad]208 */
[92635cb]209RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
[0faa9dad]210{
[bd12dda]211  const Scheduler_Control *scheduler;
212  ISR_lock_Context         lock_context;
213
[2dd098a]214  scheduler = _Thread_Scheduler_get_home( the_thread );
[bd12dda]215  _Scheduler_Acquire_critical( scheduler, &lock_context );
[92635cb]216
[24934e36]217  ( *scheduler->Operations.schedule )( scheduler, the_thread );
[bd12dda]218
219  _Scheduler_Release_critical( scheduler, &lock_context );
[0faa9dad]220}
221
[1f0d013]222/**
[6eba7c85]223 * @brief Scheduler yield with a particular thread.
[0faa9dad]224 *
[6eba7c85]225 * This routine is invoked when a thread wishes to voluntarily transfer control
226 * of the processor to another thread.
227 *
[11e7893]228 * @param the_thread The yielding thread.
[0faa9dad]229 */
[92635cb]230RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
[0faa9dad]231{
[bd12dda]232  const Scheduler_Control *scheduler;
233  ISR_lock_Context         lock_context;
[6a82f1ae]234
[2dd098a]235  scheduler = _Thread_Scheduler_get_home( the_thread );
[ca1e546e]236  _Scheduler_Acquire_critical( scheduler, &lock_context );
[2df4abc]237  ( *scheduler->Operations.yield )(
238    scheduler,
239    the_thread,
240    _Thread_Scheduler_get_home_node( the_thread )
241  );
[ca1e546e]242  _Scheduler_Release_critical( scheduler, &lock_context );
[0faa9dad]243}
244
[1f0d013]245/**
[92635cb]246 * @brief Blocks a thread with respect to the scheduler.
[0faa9dad]247 *
[1f0d013]248 * This routine removes @a the_thread from the scheduling decision for
249 * the scheduler. The primary task is to remove the thread from the
[11e7893]250 * ready queue.  It performs any necessary scheduling operations
[1f0d013]251 * including the selection of a new heir thread.
[92635cb]252 *
[11e7893]253 * @param the_thread The thread.
[0faa9dad]254 */
[92635cb]255RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
[0faa9dad]256{
[351c14d]257#if defined(RTEMS_SMP)
258  Chain_Node              *node;
259  const Chain_Node        *tail;
260  Scheduler_Node          *scheduler_node;
[bd12dda]261  const Scheduler_Control *scheduler;
262  ISR_lock_Context         lock_context;
263
[351c14d]264  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
265  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
266
267  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
268  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
269
[bd12dda]270  _Scheduler_Acquire_critical( scheduler, &lock_context );
[351c14d]271  ( *scheduler->Operations.block )(
272    scheduler,
273    the_thread,
274    scheduler_node
275  );
276  _Scheduler_Release_critical( scheduler, &lock_context );
277
278  node = _Chain_Next( node );
279
280  while ( node != tail ) {
281    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
282    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
[92635cb]283
[351c14d]284    _Scheduler_Acquire_critical( scheduler, &lock_context );
285    ( *scheduler->Operations.withdraw_node )(
286      scheduler,
287      the_thread,
288      scheduler_node,
289      THREAD_SCHEDULER_BLOCKED
290    );
291    _Scheduler_Release_critical( scheduler, &lock_context );
292
293    node = _Chain_Next( node );
294  }
295#else
296  const Scheduler_Control *scheduler;
297
[2dd098a]298  scheduler = _Thread_Scheduler_get_home( the_thread );
[e382a1b]299  ( *scheduler->Operations.block )(
300    scheduler,
301    the_thread,
302    _Thread_Scheduler_get_home_node( the_thread )
303  );
[351c14d]304#endif
[0faa9dad]305}
306
[1f0d013]307/**
[92635cb]308 * @brief Unblocks a thread with respect to the scheduler.
[0faa9dad]309 *
[9bfad8c]310 * This operation must fetch the latest thread priority value for this
311 * scheduler instance and update its internal state if necessary.
[92635cb]312 *
[11e7893]313 * @param the_thread The thread.
[9bfad8c]314 *
315 * @see _Scheduler_Node_get_priority().
[0faa9dad]316 */
[92635cb]317RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
[0faa9dad]318{
[d8bc0730]319  Scheduler_Node          *scheduler_node;
[bd12dda]320  const Scheduler_Control *scheduler;
321  ISR_lock_Context         lock_context;
[351c14d]322
[d8bc0730]323#if defined(RTEMS_SMP)
324  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE(
325    _Chain_First( &the_thread->Scheduler.Scheduler_nodes )
326  );
327  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
328#else
329  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
[2dd098a]330  scheduler = _Thread_Scheduler_get_home( the_thread );
[d8bc0730]331#endif
332
[ca1e546e]333  _Scheduler_Acquire_critical( scheduler, &lock_context );
[d8bc0730]334  ( *scheduler->Operations.unblock )( scheduler, the_thread, scheduler_node );
[ca1e546e]335  _Scheduler_Release_critical( scheduler, &lock_context );
[0faa9dad]336}
337
[f39f667a]338/**
339 * @brief Propagates a priority change of a thread to the scheduler.
340 *
[9bfad8c]341 * On uni-processor configurations, this operation must evaluate the thread
342 * state.  In case the thread is not ready, then the priority update should be
343 * deferred to the next scheduler unblock operation.
[f39f667a]344 *
[b8a5abf]345 * The operation must update the heir and thread dispatch necessary variables
346 * in case the set of scheduled threads changes.
347 *
[11e7893]348 * @param the_thread The thread changing its priority.
[9bfad8c]349 *
350 * @see _Scheduler_Node_get_priority().
[f39f667a]351 */
[9bfad8c]352RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
[f39f667a]353{
[8568341]354#if defined(RTEMS_SMP)
[351c14d]355  Chain_Node       *node;
356  const Chain_Node *tail;
[92635cb]357
[2403473]358  _Thread_Scheduler_process_requests( the_thread );
359
[351c14d]360  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
361  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
[bd12dda]362
[351c14d]363  do {
364    Scheduler_Node          *scheduler_node;
365    const Scheduler_Control *scheduler;
366    ISR_lock_Context         lock_context;
367
368    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
369    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
370
371    _Scheduler_Acquire_critical( scheduler, &lock_context );
[97f7dac]372    ( *scheduler->Operations.update_priority )(
[351c14d]373      scheduler,
374      the_thread,
375      scheduler_node
376    );
377    _Scheduler_Release_critical( scheduler, &lock_context );
378
379    node = _Chain_Next( node );
380  } while ( node != tail );
381#else
382  const Scheduler_Control *scheduler;
383
[2dd098a]384  scheduler = _Thread_Scheduler_get_home( the_thread );
[351c14d]385  ( *scheduler->Operations.update_priority )(
386    scheduler,
[501043a]387    the_thread,
388    _Thread_Scheduler_get_home_node( the_thread )
389  );
[8568341]390#endif
[f39f667a]391}
392
[3a27248]393#if defined(RTEMS_SMP)
394/**
395 * @brief Changes the sticky level of the home scheduler node and propagates a
396 * priority change of a thread to the scheduler.
397 *
[11e7893]398 * @param the_thread The thread changing its priority or sticky level.
[3a27248]399 *
400 * @see _Scheduler_Update_priority().
401 */
402RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
403  Thread_Control *the_thread,
404  int             sticky_level_change
405)
406{
407  Chain_Node              *node;
408  const Chain_Node        *tail;
409  Scheduler_Node          *scheduler_node;
410  const Scheduler_Control *scheduler;
411  ISR_lock_Context         lock_context;
412
413  _Thread_Scheduler_process_requests( the_thread );
414
415  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
416  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
417  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
418
419  _Scheduler_Acquire_critical( scheduler, &lock_context );
420
[6771359f]421  scheduler_node->sticky_level += sticky_level_change;
422  _Assert( scheduler_node->sticky_level >= 0 );
423
[3a27248]424  ( *scheduler->Operations.update_priority )(
425    scheduler,
426    the_thread,
427    scheduler_node
428  );
429
430  _Scheduler_Release_critical( scheduler, &lock_context );
431
432  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
433  node = _Chain_Next( node );
434
435  while ( node != tail ) {
436    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
437    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
438
439    _Scheduler_Acquire_critical( scheduler, &lock_context );
440    ( *scheduler->Operations.update_priority )(
441      scheduler,
442      the_thread,
443      scheduler_node
444    );
445    _Scheduler_Release_critical( scheduler, &lock_context );
446
447    node = _Chain_Next( node );
448  }
449}
450#endif
451
[77ff5599]452/**
453 * @brief Maps a thread priority from the user domain to the scheduler domain.
454 *
455 * Let M be the maximum scheduler priority.  The mapping must be bijective in
456 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
457 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
458 * other values the mapping is undefined.
459 *
[11e7893]460 * @param scheduler The scheduler instance.
461 * @param priority The user domain thread priority.
[77ff5599]462 *
463 * @return The corresponding thread priority of the scheduler domain is returned.
464 */
465RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
466  const Scheduler_Control *scheduler,
467  Priority_Control         priority
468)
469{
470  return ( *scheduler->Operations.map_priority )( scheduler, priority );
471}
472
473/**
474 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
475 *
[11e7893]476 * @param scheduler The scheduler instance.
477 * @param priority The scheduler domain thread priority.
[77ff5599]478 *
479 * @return The corresponding thread priority of the user domain is returned.
480 */
481RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
482  const Scheduler_Control *scheduler,
483  Priority_Control         priority
484)
485{
486  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
487}
488
[1f0d013]489/**
[8e467384]490 * @brief Initializes a scheduler node.
[0faa9dad]491 *
[8e467384]492 * The scheduler node contains arbitrary data on function entry.  The caller
493 * must ensure that _Scheduler_Node_destroy() will be called after a
494 * _Scheduler_Node_initialize() before the memory of the scheduler node is
495 * destroyed.
496 *
[11e7893]497 * @param scheduler The scheduler instance.
498 * @param[out] node The scheduler node to initialize.
499 * @param the_thread The thread of the scheduler node to initialize.
500 * @param priority The thread priority.
[0faa9dad]501 */
[8e467384]502RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
[e1598a6]503  const Scheduler_Control *scheduler,
[df2177ab]504  Scheduler_Node          *node,
[9bfad8c]505  Thread_Control          *the_thread,
506  Priority_Control         priority
[0faa9dad]507)
508{
[9bfad8c]509  ( *scheduler->Operations.node_initialize )(
510    scheduler,
[df2177ab]511    node,
[9bfad8c]512    the_thread,
513    priority
514  );
[0faa9dad]515}
516
[1f0d013]517/**
[8e467384]518 * @brief Destroys a scheduler node.
[0faa9dad]519 *
[8e467384]520 * The caller must ensure that _Scheduler_Node_destroy() will be called only
521 * after a corresponding _Scheduler_Node_initialize().
522 *
[11e7893]523 * @param scheduler The scheduler instance.
524 * @param[out] node The scheduler node to destroy.
[0faa9dad]525 */
[8e467384]526RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
[e1598a6]527  const Scheduler_Control *scheduler,
[df2177ab]528  Scheduler_Node          *node
[0faa9dad]529)
530{
[df2177ab]531  ( *scheduler->Operations.node_destroy )( scheduler, node );
[0faa9dad]532}
533
[ac9d2ecc]534/**
[92635cb]535 * @brief Releases a job of a thread with respect to the scheduler.
[ac9d2ecc]536 *
[11e7893]537 * @param the_thread The thread.
538 * @param priority_node The priority node of the job.
539 * @param deadline The deadline in watchdog ticks since boot.
540 * @param queue_context The thread queue context to provide the set of
[300f6a48]541 *   threads for _Thread_Priority_update().
[ac9d2ecc]542 */
[300f6a48]543RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
544  Thread_Control       *the_thread,
545  Priority_Node        *priority_node,
546  uint64_t              deadline,
547  Thread_queue_Context *queue_context
[ac9d2ecc]548)
549{
[2dd098a]550  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
[92635cb]551
[300f6a48]552  _Thread_queue_Context_clear_priority_updates( queue_context );
553  ( *scheduler->Operations.release_job )(
[ee0e4135]554    scheduler,
555    the_thread,
[300f6a48]556    priority_node,
557    deadline,
558    queue_context
[ee0e4135]559  );
[ac9d2ecc]560}
561
[21bdca4]562/**
563 * @brief Cancels a job of a thread with respect to the scheduler.
564 *
[11e7893]565 * @param the_thread The thread.
566 * @param priority_node The priority node of the job.
567 * @param queue_context The thread queue context to provide the set of
[300f6a48]568 *   threads for _Thread_Priority_update().
[21bdca4]569 */
[300f6a48]570RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
571  Thread_Control       *the_thread,
572  Priority_Node        *priority_node,
573  Thread_queue_Context *queue_context
[21bdca4]574)
575{
[2dd098a]576  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
[21bdca4]577
[300f6a48]578  _Thread_queue_Context_clear_priority_updates( queue_context );
579  ( *scheduler->Operations.cancel_job )(
580    scheduler,
581    the_thread,
582    priority_node,
583    queue_context
584  );
[21bdca4]585}
586
[1f0d013]587/**
588 * @brief Scheduler method invoked at each clock tick.
[3203e09]589 *
590 * This method is invoked at each clock tick to allow the scheduler
[1f0d013]591 * implementation to perform any activities required.  For the
[3203e09]592 * scheduler which support standard RTEMS features, this includes
593 * time-slicing management.
[11e7893]594 *
595 * @param cpu The cpu control for the operation.
[3203e09]596 */
[03b900d]597RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
[3203e09]598{
[03b900d]599  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
600  Thread_Control *executing = cpu->executing;
[c5831a3f]601
[03b900d]602  if ( scheduler != NULL && executing != NULL ) {
603    ( *scheduler->Operations.tick )( scheduler, executing );
[c5831a3f]604  }
[3203e09]605}
606
[1ccb64e1]607/**
608 * @brief Starts the idle thread for a particular processor.
609 *
[11e7893]610 * @param scheduler The scheduler instance.
[24934e36]611 * @param[in,out] the_thread The idle thread for the processor.
[a7e4de2]612 * @param[in,out] cpu The processor for the idle thread.
[1ccb64e1]613 *
614 * @see _Thread_Create_idle().
615 */
616RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
[e1598a6]617  const Scheduler_Control *scheduler,
618  Thread_Control          *the_thread,
619  Per_CPU_Control         *cpu
[1ccb64e1]620)
621{
[24934e36]622  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
[1ccb64e1]623}
624
[11e7893]625/**
626 * @brief Checks if the scheduler of the cpu with the given index is equal
627 *      to the given scheduler.
628 *
629 * @param scheduler The scheduler for the comparison.
630 * @param cpu_index The index of the cpu for the comparison.
631 *
632 * @retval true The scheduler of the cpu is the given @a scheduler.
633 * @retval false The scheduler of the cpu is not the given @a scheduler.
634 */
[c5831a3f]635RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
636  const Scheduler_Control *scheduler,
[1c46b80]637  uint32_t                 cpu_index
[c5831a3f]638)
639{
640#if defined(RTEMS_SMP)
[1c46b80]641  const Per_CPU_Control   *cpu;
642  const Scheduler_Control *scheduler_of_cpu;
643
644  cpu = _Per_CPU_Get_by_index( cpu_index );
645  scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
[c5831a3f]646
[1c46b80]647  return scheduler_of_cpu == scheduler;
[c5831a3f]648#else
649  (void) scheduler;
650  (void) cpu_index;
651
652  return true;
653#endif
654}
655
[11e7893]656/**
657 * @brief Gets the processors of the scheduler
658 *
659 * @param scheduler The scheduler to get the processors of.
660 *
661 * @return The processors of the context of the given scheduler.
662 */
[6b1d8c7]663RTEMS_INLINE_ROUTINE const Processor_mask *_Scheduler_Get_processors(
664  const Scheduler_Control *scheduler
[0712d17]665)
666{
[c5831a3f]667#if defined(RTEMS_SMP)
[6b1d8c7]668  return &_Scheduler_Get_context( scheduler )->Processors;
[c5831a3f]669#else
[6b1d8c7]670  return &_Processor_mask_The_one_and_only;
[c5831a3f]671#endif
[0712d17]672}
673
[11e7893]674/**
675 * @brief Copies the thread's scheduler's affinity to the given cpuset.
676 *
677 * @param the_thread The thread to get the affinity of its scheduler.
678 * @param cpusetsize The size of @a cpuset.
679 * @param[out] cpuset The cpuset that serves as destination for the copy operation
680 *
681 * @retval true The copy operation was lossless.
682 * @retval false The copy operation was not lossless
683 */
[0712d17]684bool _Scheduler_Get_affinity(
[a92c488]685  Thread_Control *the_thread,
686  size_t          cpusetsize,
687  cpu_set_t      *cpuset
[0712d17]688);
689
[11e7893]690/**
691 * @brief Checks if the affinity is a subset of the online processors.
692 *
693 * @param scheduler This parameter is unused.
694 * @param the_thread This parameter is unused.
695 * @param node This parameter is unused.
696 * @param affinity The processor mask to check.
697 *
698 * @retval true @a affinity is a subset of the online processors.
699 * @retval false @a affinity is not a subset of the online processors.
700 */
[0712d17]701RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
702  const Scheduler_Control *scheduler,
703  Thread_Control          *the_thread,
[197a614]704  Scheduler_Node          *node,
[0232b28]705  const Processor_mask    *affinity
[0712d17]706)
707{
[16347a6]708  (void) scheduler;
709  (void) the_thread;
[197a614]710  (void) node;
[16347a6]711  return _Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() );
[0712d17]712}
713
[11e7893]714/**
715 * @brief Sets the thread's scheduler's affinity.
716 *
717 * @param[in, out] the_thread The thread to set the affinity of.
718 * @param cpusetsize The size of @a cpuset.
719 * @param cpuset The cpuset to set the affinity.
720 *
721 * @retval true The operation succeeded.
722 * @retval false The operation did not succeed.
723 */
[0712d17]724bool _Scheduler_Set_affinity(
[e135271]725  Thread_Control  *the_thread,
726  size_t           cpusetsize,
727  const cpu_set_t *cpuset
[0712d17]728);
729
[11e7893]730/**
731 * @brief Blocks the thread.
732 *
733 * @param scheduler The scheduler instance.
734 * @param the_thread The thread to block.
735 * @param node The corresponding scheduler node.
736 * @param extract Method to extract the thread.
737 * @param schedule Method for scheduling threads.
738 */
[e5ca54c9]739RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
[e1598a6]740  const Scheduler_Control *scheduler,
741  Thread_Control          *the_thread,
[e382a1b]742  Scheduler_Node          *node,
[e1598a6]743  void                  ( *extract )(
744                             const Scheduler_Control *,
[e382a1b]745                             Thread_Control *,
746                             Scheduler_Node *
747                        ),
[e1598a6]748  void                  ( *schedule )(
749                             const Scheduler_Control *,
750                             Thread_Control *,
[e382a1b]751                             bool
752                        )
[e5ca54c9]753)
754{
[e382a1b]755  ( *extract )( scheduler, the_thread, node );
[e5ca54c9]756
757  /* TODO: flash critical section? */
758
[24934e36]759  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
760    ( *schedule )( scheduler, the_thread, true );
[e5ca54c9]761  }
762}
763
[11e7893]764/**
765 * @brief Gets the number of processors of the scheduler.
766 *
767 * @param scheduler The scheduler instance to get the number of processors of.
768 *
769 * @return The number of processors.
770 */
[e239760]771RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
772  const Scheduler_Control *scheduler
773)
774{
775#if defined(RTEMS_SMP)
[6b1d8c7]776  const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
777
778  return _Processor_mask_Count( &context->Processors );
[e239760]779#else
780  (void) scheduler;
781
782  return 1;
783#endif
784}
785
[11e7893]786/**
787 * @brief Builds an object build id.
788 *
789 * @param scheduler_index The index to build the build id out of.
790 *
791 * @return The build id.
792 */
[b427a92]793RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
794{
795  return _Objects_Build_id(
796    OBJECTS_FAKE_OBJECTS_API,
797    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
798    _Objects_Local_node,
[1d72f03]799    (uint16_t) ( scheduler_index + 1 )
[b427a92]800  );
801}
802
[11e7893]803/**
804 * @brief Gets the scheduler index from the given object build id.
805 *
806 * @param id The object build id.
807 *
808 * @return The scheduler index.
809 */
[c8e83288]810RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
811{
812  uint32_t minimum_id = _Scheduler_Build_id( 0 );
813
814  return id - minimum_id;
815}
816
[11e7893]817/**
818 * @brief Gets the scheduler from the given object build id.
819 *
820 * @param id The object build id.
821 *
822 * @return The scheduler to the object id.
823 */
[2612a0b]824RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_id(
825  Objects_Id id
[1b67535d]826)
827{
[2612a0b]828  uint32_t index;
[1b67535d]829
[2612a0b]830  index = _Scheduler_Get_index_by_id( id );
[1b67535d]831
[2612a0b]832  if ( index >= _Scheduler_Count ) {
833    return NULL;
834  }
835
836  return &_Scheduler_Table[ index ];
[1b67535d]837}
838
[11e7893]839/**
840 * @brief Gets the index of the scheduler
841 *
842 * @param scheduler The scheduler to get the index of.
843 *
844 * @return The index of the given scheduler.
845 */
[27270b0d]846RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
847  const Scheduler_Control *scheduler
848)
849{
850  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
851}
852
[8f0c7a46]853#if defined(RTEMS_SMP)
[5c3d250]854/**
855 * @brief Gets an idle thread from the scheduler instance.
856 *
[11e7893]857 * @param context The scheduler instance context.
[5c3d250]858 *
[11e7893]859 * @return idle An idle thread for use.  This function must always return an
[5c3d250]860 * idle thread.  If none is available, then this is a fatal error.
861 */
862typedef Thread_Control *( *Scheduler_Get_idle_thread )(
863  Scheduler_Context *context
864);
865
866/**
867 * @brief Releases an idle thread to the scheduler instance for reuse.
868 *
[11e7893]869 * @param context The scheduler instance context.
870 * @param idle The idle thread to release.
[5c3d250]871 */
872typedef void ( *Scheduler_Release_idle_thread )(
873  Scheduler_Context *context,
874  Thread_Control    *idle
875);
876
[11e7893]877/**
878 * @brief Changes the threads state to the given new state.
879 *
880 * @param[out] the_thread The thread to change the state of.
881 * @param new_state The new state for @a the_thread.
882 */
[5c3d250]883RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
884  Thread_Control         *the_thread,
885  Thread_Scheduler_state  new_state
886)
887{
[a7a8ec03]888  _Assert(
889    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
890      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
891      || !_System_state_Is_up( _System_state_Get() )
892  );
[5c3d250]893
894  the_thread->Scheduler.state = new_state;
895}
896
[11e7893]897/**
898 * @brief Sets the scheduler node's idle thread.
899 *
900 * @param[in, out] node The node to receive an idle thread.
901 * @param idle The idle thread control for the operation.
902 */
[be0366b]903RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
904  Scheduler_Node *node,
905  Thread_Control *idle
906)
907{
908  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
909  _Assert(
910    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
911  );
912
913  _Scheduler_Node_set_user( node, idle );
914  node->idle = idle;
915}
916
[5c3d250]917/**
[11e7893]918 * @brief Uses an idle thread for this scheduler node.
[5c3d250]919 *
[11e7893]920 * A thread whose home scheduler node has a sticky level greater than zero may
921 * use an idle thread in the home scheduler instance in the case it executes
922 * currently in another scheduler instance or in the case it is in a blocking
[6771359f]923 * state.
[5c3d250]924 *
[11e7893]925 * @param context The scheduler instance context.
926 * @param[in, out] node The node which wants to use the idle thread.
927 * @param cpu The processor for the idle thread.
928 * @param get_idle_thread Function to get an idle thread.
[5c3d250]929 */
930RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
931  Scheduler_Context         *context,
932  Scheduler_Node            *node,
[6771359f]933  Per_CPU_Control           *cpu,
[5c3d250]934  Scheduler_Get_idle_thread  get_idle_thread
935)
936{
937  Thread_Control *idle = ( *get_idle_thread )( context );
938
[be0366b]939  _Scheduler_Set_idle_thread( node, idle );
[6771359f]940  _Thread_Set_CPU( idle, cpu );
[5c3d250]941  return idle;
942}
943
[be0366b]944typedef enum {
945  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
946  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
947  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
948} Scheduler_Try_to_schedule_action;
949
[5c3d250]950/**
[11e7893]951 * @brief Tries to schedule this scheduler node.
[5c3d250]952 *
[11e7893]953 * @param context The scheduler instance context.
954 * @param[in, out] node The node which wants to get scheduled.
955 * @param idle A potential idle thread used by a potential victim node.
956 * @param get_idle_thread Function to get an idle thread.
[5c3d250]957 *
958 * @retval true This node can be scheduled.
[11e7893]959 * @retval false This node cannot be scheduled.
[5c3d250]960 */
[be0366b]961RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
962_Scheduler_Try_to_schedule_node(
[5c3d250]963  Scheduler_Context         *context,
964  Scheduler_Node            *node,
[be0366b]965  Thread_Control            *idle,
[5c3d250]966  Scheduler_Get_idle_thread  get_idle_thread
967)
968{
[a7a8ec03]969  ISR_lock_Context                  lock_context;
970  Scheduler_Try_to_schedule_action  action;
[76ad5e0c]971  Thread_Control                   *owner;
[5c3d250]972
[be0366b]973  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
[76ad5e0c]974  owner = _Scheduler_Node_get_owner( node );
975  _Assert( _Scheduler_Node_get_user( node ) == owner );
976  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
[a7a8ec03]977
[76ad5e0c]978  _Thread_Scheduler_acquire_critical( owner, &lock_context );
[be0366b]979
[76ad5e0c]980  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
981    _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
982    _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
[6771359f]983  } else if (
[76ad5e0c]984    owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
985      && node->sticky_level <= 1
[6771359f]986  ) {
987    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
[76ad5e0c]988  } else if ( node->sticky_level == 0 ) {
989    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
[6771359f]990  } else if ( idle != NULL ) {
991    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
992  } else {
993    _Scheduler_Use_idle_thread(
994      context,
995      node,
[76ad5e0c]996      _Thread_Get_CPU( owner ),
[6771359f]997      get_idle_thread
998    );
[a7a8ec03]999  }
1000
[76ad5e0c]1001  _Thread_Scheduler_release_critical( owner, &lock_context );
[be0366b]1002  return action;
[5c3d250]1003}
1004
1005/**
[11e7893]1006 * @brief Releases an idle thread using this scheduler node.
[5c3d250]1007 *
[11e7893]1008 * @param context The scheduler instance context.
1009 * @param[in, out] node The node which may have an idle thread as user.
1010 * @param release_idle_thread Function to release an idle thread.
[5c3d250]1011 *
1012 * @retval idle The idle thread which used this node.
1013 * @retval NULL This node had no idle thread as an user.
1014 */
1015RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1016  Scheduler_Context             *context,
1017  Scheduler_Node                *node,
1018  Scheduler_Release_idle_thread  release_idle_thread
1019)
1020{
1021  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1022
1023  if ( idle != NULL ) {
1024    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1025
1026    node->idle = NULL;
1027    _Scheduler_Node_set_user( node, owner );
1028    ( *release_idle_thread )( context, idle );
1029  }
1030
1031  return idle;
1032}
1033
[11e7893]1034/**
1035 * @brief Exchanges an idle thread from the scheduler node that uses it
1036 *      right now to another scheduler node.
1037 *
1038 * @param needs_idle The scheduler node that needs an idle thread.
1039 * @param uses_idle The scheduler node that used the idle thread.
1040 * @param idle The idle thread that is exchanged.
1041 */
[be0366b]1042RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1043  Scheduler_Node *needs_idle,
1044  Scheduler_Node *uses_idle,
1045  Thread_Control *idle
1046)
1047{
1048  uses_idle->idle = NULL;
1049  _Scheduler_Node_set_user(
1050    uses_idle,
1051    _Scheduler_Node_get_owner( uses_idle )
1052  );
1053  _Scheduler_Set_idle_thread( needs_idle, idle );
1054}
1055
[5c3d250]1056/**
[11e7893]1057 * @brief Blocks this scheduler node.
[5c3d250]1058 *
[11e7893]1059 * @param context The scheduler instance context.
1060 * @param[in, out] thread The thread which wants to get blocked referencing this
[5bd822a7]1061 *   node.  This is not necessarily the user of this node in case the node
1062 *   participates in the scheduler helping protocol.
[11e7893]1063 * @param[in, out] node The node which wants to get blocked.
1064 * @param is_scheduled This node is scheduled.
1065 * @param get_idle_thread Function to get an idle thread.
[5c3d250]1066 *
[edb020c]1067 * @retval thread_cpu The processor of the thread.  Indicates to continue with
1068 *   the blocking operation.
1069 * @retval NULL Otherwise.
[5c3d250]1070 */
[edb020c]1071RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
[5c3d250]1072  Scheduler_Context         *context,
[cceb19f4]1073  Thread_Control            *thread,
[5c3d250]1074  Scheduler_Node            *node,
1075  bool                       is_scheduled,
1076  Scheduler_Get_idle_thread  get_idle_thread
1077)
1078{
[6771359f]1079  int               sticky_level;
[a7a8ec03]1080  ISR_lock_Context  lock_context;
[edb020c]1081  Per_CPU_Control  *thread_cpu;
[5c3d250]1082
[6771359f]1083  sticky_level = node->sticky_level;
1084  --sticky_level;
1085  node->sticky_level = sticky_level;
1086  _Assert( sticky_level >= 0 );
1087
[a7a8ec03]1088  _Thread_Scheduler_acquire_critical( thread, &lock_context );
[edb020c]1089  thread_cpu = _Thread_Get_CPU( thread );
[351c14d]1090  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
[5bd822a7]1091  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
[a7a8ec03]1092  _Thread_Scheduler_release_critical( thread, &lock_context );
[5c3d250]1093
[6771359f]1094  if ( sticky_level > 0 ) {
1095    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
1096      Thread_Control *idle;
1097
1098      idle = _Scheduler_Use_idle_thread(
1099        context,
1100        node,
1101        thread_cpu,
1102        get_idle_thread
1103      );
1104      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
1105    }
[5c3d250]1106
[6771359f]1107    return NULL;
[5bd822a7]1108  }
1109
[6771359f]1110  _Assert( thread == _Scheduler_Node_get_user( node ) );
1111  return thread_cpu;
1112}
[5bd822a7]1113
[11e7893]1114/**
1115 * @brief Discard the idle thread from the scheduler node.
1116 *
1117 * @param context The scheduler context.
1118 * @param[in, out] the_thread The thread for the operation.
1119 * @param[in, out] node The scheduler node to discard the idle thread from.
1120 * @param release_idle_thread Method to release the idle thread from the context.
1121 */
[6771359f]1122RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
1123  Scheduler_Context             *context,
1124  Thread_Control                *the_thread,
1125  Scheduler_Node                *node,
1126  Scheduler_Release_idle_thread  release_idle_thread
1127)
1128{
1129  Thread_Control  *idle;
1130  Thread_Control  *owner;
1131  Per_CPU_Control *cpu;
[5c3d250]1132
[6771359f]1133  idle = _Scheduler_Node_get_idle( node );
1134  owner = _Scheduler_Node_get_owner( node );
[5c3d250]1135
[6771359f]1136  node->idle = NULL;
1137  _Assert( _Scheduler_Node_get_user( node ) == idle );
1138  _Scheduler_Node_set_user( node, owner );
1139  ( *release_idle_thread )( context, idle );
[5c3d250]1140
[6771359f]1141  cpu = _Thread_Get_CPU( idle );
1142  _Thread_Set_CPU( the_thread, cpu );
1143  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
[5c3d250]1144}
1145
1146/**
[11e7893]1147 * @brief Unblocks this scheduler node.
[5c3d250]1148 *
[11e7893]1149 * @param context The scheduler instance context.
1150 * @param[in, out] the_thread The thread which wants to get unblocked.
1151 * @param[in, out] node The node which wants to get unblocked.
1152 * @param is_scheduled This node is scheduled.
1153 * @param release_idle_thread Function to release an idle thread.
[5c3d250]1154 *
1155 * @retval true Continue with the unblocking operation.
[11e7893]1156 * @retval false Do not continue with the unblocking operation.
[5c3d250]1157 */
1158RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1159  Scheduler_Context             *context,
1160  Thread_Control                *the_thread,
1161  Scheduler_Node                *node,
1162  bool                           is_scheduled,
1163  Scheduler_Release_idle_thread  release_idle_thread
1164)
1165{
1166  bool unblock;
1167
[6771359f]1168  ++node->sticky_level;
1169  _Assert( node->sticky_level > 0 );
1170
[5c3d250]1171  if ( is_scheduled ) {
[6771359f]1172    _Scheduler_Discard_idle_thread(
[5bd822a7]1173      context,
[6771359f]1174      the_thread,
[5bd822a7]1175      node,
1176      release_idle_thread
1177    );
[6771359f]1178    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
[5c3d250]1179    unblock = false;
1180  } else {
1181    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1182    unblock = true;
1183  }
1184
1185  return unblock;
1186}
[8f0c7a46]1187#endif
1188
[11e7893]1189/**
1190 * @brief Updates the heir.
1191 *
1192 * @param[in, out] new_heir The new heir.
1193 * @param force_dispatch Indicates whether the dispatch happens also if the
1194 *      currently running thread is set as not preemptible.
1195 */
[d37adfe5]1196RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1197  Thread_Control *new_heir,
1198  bool            force_dispatch
1199)
1200{
1201  Thread_Control *heir = _Thread_Heir;
1202
1203  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1204#if defined(RTEMS_SMP)
[baa13626]1205    /*
1206     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1207     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1208     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1209     * schedulers.
1210     */
1211    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1212    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
[d37adfe5]1213#endif
1214    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1215    _Thread_Heir = new_heir;
1216    _Thread_Dispatch_necessary = true;
1217  }
1218}
1219
[11e7893]1220/**
1221 * @brief Sets a new scheduler.
1222 *
1223 * @param new_scheduler The new scheduler to set.
1224 * @param[in, out] the_thread The thread for the operations.
1225 * @param priority The initial priority for the thread with the new scheduler.
1226 *
1227 * @retval STATUS_SUCCESSFUL The operation succeeded.
1228 * @retval STATUS_RESOURCE_IN_USE The thread's wait queue is not empty.
1229 * @retval STATUS_UNSATISFIED The new scheduler has no processors.
1230 */
[c0bd006]1231RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1232  const Scheduler_Control *new_scheduler,
1233  Thread_Control          *the_thread,
1234  Priority_Control         priority
1235)
1236{
[2612a0b]1237  Scheduler_Node          *new_scheduler_node;
1238  Scheduler_Node          *old_scheduler_node;
1239#if defined(RTEMS_SMP)
1240  ISR_lock_Context         lock_context;
1241  const Scheduler_Control *old_scheduler;
1242
1243#endif
[c0bd006]1244
[9e7fa07]1245  if ( the_thread->Wait.queue != NULL ) {
[c0bd006]1246    return STATUS_RESOURCE_IN_USE;
1247  }
1248
[7f742432]1249  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
[5d6b211]1250  _Priority_Plain_extract(
1251    &old_scheduler_node->Wait.Priority,
1252    &the_thread->Real_priority
1253  );
[c0bd006]1254
[7097962]1255  if (
1256    !_Priority_Is_empty( &old_scheduler_node->Wait.Priority )
[5d6b211]1257#if defined(RTEMS_SMP)
[7097962]1258      || !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes )
1259      || the_thread->Scheduler.pin_level != 0
1260#endif
1261  ) {
[2612a0b]1262    _Priority_Plain_insert(
1263      &old_scheduler_node->Wait.Priority,
1264      &the_thread->Real_priority,
1265      the_thread->Real_priority.priority
1266    );
[9e7fa07]1267    return STATUS_RESOURCE_IN_USE;
1268  }
1269
[7097962]1270#if defined(RTEMS_SMP)
[2612a0b]1271  old_scheduler = _Thread_Scheduler_get_home( the_thread );
[4a1bdd30]1272  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1273    the_thread,
1274    _Scheduler_Get_index( new_scheduler )
1275  );
[2612a0b]1276
1277  _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1278
[4a1bdd30]1279  if (
1280    _Scheduler_Get_processor_count( new_scheduler ) == 0
1281      || !( *new_scheduler->Operations.set_affinity )(
1282        new_scheduler,
1283        the_thread,
1284        new_scheduler_node,
1285        &the_thread->Scheduler.Affinity
1286      )
1287  ) {
[2612a0b]1288    _Scheduler_Release_critical( new_scheduler, &lock_context );
1289    _Priority_Plain_insert(
1290      &old_scheduler_node->Wait.Priority,
1291      &the_thread->Real_priority,
1292      the_thread->Real_priority.priority
1293    );
1294    return STATUS_UNSATISFIED;
1295  }
1296
[7097962]1297  _Assert( the_thread->Scheduler.pinned_scheduler == NULL );
1298  the_thread->Scheduler.home_scheduler = new_scheduler;
[2612a0b]1299
1300  _Scheduler_Release_critical( new_scheduler, &lock_context );
1301
[2403473]1302  _Thread_Scheduler_process_requests( the_thread );
[5d6b211]1303#else
1304  new_scheduler_node = old_scheduler_node;
1305#endif
1306
[300f6a48]1307  the_thread->Start.initial_priority = priority;
1308  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1309  _Priority_Initialize_one(
[5d6b211]1310    &new_scheduler_node->Wait.Priority,
[300f6a48]1311    &the_thread->Real_priority
1312  );
[c0bd006]1313
1314#if defined(RTEMS_SMP)
[2612a0b]1315  if ( old_scheduler != new_scheduler ) {
1316    States_Control current_state;
[c0bd006]1317
[2612a0b]1318    current_state = the_thread->current_state;
[c0bd006]1319
[2612a0b]1320    if ( _States_Is_ready( current_state ) ) {
1321      _Scheduler_Block( the_thread );
1322    }
[6771359f]1323
[2612a0b]1324    _Assert( old_scheduler_node->sticky_level == 0 );
1325    _Assert( new_scheduler_node->sticky_level == 0 );
[6771359f]1326
[2612a0b]1327    _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1328    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1329    _Chain_Initialize_one(
1330      &the_thread->Scheduler.Wait_nodes,
1331      &new_scheduler_node->Thread.Wait_node
1332    );
1333    _Chain_Extract_unprotected(
1334      &old_scheduler_node->Thread.Scheduler_node.Chain
1335    );
1336    _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1337    _Chain_Initialize_one(
1338      &the_thread->Scheduler.Scheduler_nodes,
1339      &new_scheduler_node->Thread.Scheduler_node.Chain
1340    );
[c0bd006]1341
[2612a0b]1342    _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
[c0bd006]1343
[2612a0b]1344    if ( _States_Is_ready( current_state ) ) {
1345      _Scheduler_Unblock( the_thread );
[c0bd006]1346    }
[2612a0b]1347
1348    return STATUS_SUCCESSFUL;
[c0bd006]1349  }
1350#endif
1351
[5d6b211]1352  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
[c0bd006]1353  _Scheduler_Update_priority( the_thread );
1354  return STATUS_SUCCESSFUL;
1355}
1356
[1f0d013]1357/** @} */
[0faa9dad]1358
[c6e21ee1]1359#ifdef __cplusplus
1360}
1361#endif
1362
[0faa9dad]1363#endif
1364/* end of include file */
Note: See TracBrowser for help on using the repository browser.