source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ 97f7dac

5
Last change on this file since 97f7dac was 97f7dac, checked in by Sebastian Huber <sebastian.huber@…>, on 10/21/16 at 07:23:48

score: Delete _Scheduler_Ask_for_help_if_necessary

Delete Thread_Control::Resource_node.

Update #2556.

  • Property mode set to 100644
File size: 43.8 KB
RevLine 
[1f0d013]1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
[0faa9dad]5 *
[1f0d013]6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
[0faa9dad]8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
[010192d]12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
[bd12dda]13 *  Copyright (c) 2014, 2016 embedded brains GmbH
[0faa9dad]14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
[c499856]17 *  http://www.rtems.org/license/LICENSE.
[0faa9dad]18 */
19
[c6e21ee1]20#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
21#define _RTEMS_SCORE_SCHEDULERIMPL_H
22
23#include <rtems/score/scheduler.h>
[a7a8ec03]24#include <rtems/score/assert.h>
[0712d17]25#include <rtems/score/cpusetimpl.h>
[300f6a48]26#include <rtems/score/priorityimpl.h>
[c5831a3f]27#include <rtems/score/smpimpl.h>
[c0bd006]28#include <rtems/score/status.h>
[e5ca54c9]29#include <rtems/score/threadimpl.h>
[0faa9dad]30
[c6e21ee1]31#ifdef __cplusplus
32extern "C" {
33#endif
[0faa9dad]34
35/**
[1f0d013]36 * @addtogroup ScoreScheduler
[0faa9dad]37 */
[b697bc6]38/**@{**/
[0faa9dad]39
[c6e21ee1]40/**
41 *  @brief Initializes the scheduler to the policy chosen by the user.
42 *
43 *  This routine initializes the scheduler to the policy chosen by the user
44 *  through confdefs, or to the priority scheduler with ready chains by
45 *  default.
46 */
47void _Scheduler_Handler_initialization( void );
48
[5c3d250]49RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
50  const Scheduler_Control *scheduler
51)
52{
53  return scheduler->context;
54}
55
[92635cb]56RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
57  const Thread_Control *the_thread
58)
59{
60#if defined(RTEMS_SMP)
[2d36931]61  return the_thread->Scheduler.control;
[92635cb]62#else
63  (void) the_thread;
64
65  return &_Scheduler_Table[ 0 ];
66#endif
67}
68
[5c3d250]69RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
70  const Thread_Control *the_thread
71)
72{
73#if defined(RTEMS_SMP)
74  return the_thread->Scheduler.own_control;
75#else
76  (void) the_thread;
77
78  return &_Scheduler_Table[ 0 ];
79#endif
80}
81
[c5831a3f]82RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
83  uint32_t cpu_index
84)
85{
86#if defined(RTEMS_SMP)
87  return _Scheduler_Assignments[ cpu_index ].scheduler;
88#else
89  (void) cpu_index;
90
91  return &_Scheduler_Table[ 0 ];
92#endif
93}
94
95RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
96  const Per_CPU_Control *cpu
97)
98{
99  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
100
101  return _Scheduler_Get_by_CPU_index( cpu_index );
102}
103
[bd12dda]104/**
105 * @brief Acquires the scheduler instance inside a critical section (interrupts
106 * disabled).
107 *
108 * @param[in] scheduler The scheduler instance.
109 * @param[in] lock_context The lock context to use for
110 *   _Scheduler_Release_critical().
111 */
112RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
113  const Scheduler_Control *scheduler,
114  ISR_lock_Context        *lock_context
115)
116{
[913864c]117#if defined(RTEMS_SMP)
118  Scheduler_Context *context;
119
120  context = _Scheduler_Get_context( scheduler );
121  _ISR_lock_Acquire( &context->Lock, lock_context );
122#else
[bd12dda]123  (void) scheduler;
[913864c]124  (void) lock_context;
125#endif
[bd12dda]126}
127
128/**
129 * @brief Releases the scheduler instance inside a critical section (interrupts
130 * disabled).
131 *
132 * @param[in] scheduler The scheduler instance.
133 * @param[in] lock_context The lock context used for
134 *   _Scheduler_Acquire_critical().
135 */
136RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
137  const Scheduler_Control *scheduler,
138  ISR_lock_Context        *lock_context
139)
140{
[913864c]141#if defined(RTEMS_SMP)
142  Scheduler_Context *context;
143
144  context = _Scheduler_Get_context( scheduler );
145  _ISR_lock_Release( &context->Lock, lock_context );
146#else
[bd12dda]147  (void) scheduler;
[913864c]148  (void) lock_context;
149#endif
[bd12dda]150}
151
[501043a]152RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
153  const Thread_Control *the_thread
154)
155{
156#if defined(RTEMS_SMP)
157  return the_thread->Scheduler.node;
158#else
159  return the_thread->Scheduler.nodes;
160#endif
161}
162
[0faa9dad]163/**
[1f0d013]164 * The preferred method to add a new scheduler is to define the jump table
165 * entries and add a case to the _Scheduler_Initialize routine.
[0faa9dad]166 *
[1f0d013]167 * Generic scheduling implementations that rely on the ready queue only can
[0faa9dad]168 * be found in the _Scheduler_queue_XXX functions.
169 */
170
[1f0d013]171/*
172 * Passing the Scheduler_Control* to these functions allows for multiple
173 * scheduler's to exist simultaneously, which could be useful on an SMP
174 * system.  Then remote Schedulers may be accessible.  How to protect such
[0faa9dad]175 * accesses remains an open problem.
176 */
177
[1f0d013]178/**
[92635cb]179 * @brief General scheduling decision.
[0faa9dad]180 *
[1f0d013]181 * This kernel routine implements the scheduling decision logic for
182 * the scheduler. It does NOT dispatch.
[e5ca54c9]183 *
[24934e36]184 * @param[in] the_thread The thread which state changed previously.
[0faa9dad]185 */
[92635cb]186RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
[0faa9dad]187{
[bd12dda]188  const Scheduler_Control *scheduler;
189  ISR_lock_Context         lock_context;
190
191  scheduler = _Scheduler_Get( the_thread );
192  _Scheduler_Acquire_critical( scheduler, &lock_context );
[92635cb]193
[24934e36]194  ( *scheduler->Operations.schedule )( scheduler, the_thread );
[bd12dda]195
196  _Scheduler_Release_critical( scheduler, &lock_context );
[0faa9dad]197}
198
[1f0d013]199/**
[6eba7c85]200 * @brief Scheduler yield with a particular thread.
[0faa9dad]201 *
[6eba7c85]202 * This routine is invoked when a thread wishes to voluntarily transfer control
203 * of the processor to another thread.
204 *
[24934e36]205 * @param[in] the_thread The yielding thread.
[0faa9dad]206 */
[92635cb]207RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
[0faa9dad]208{
[6a82f1ae]209#if defined(RTEMS_SMP)
210  Chain_Node              *node;
211  const Chain_Node        *tail;
212  Scheduler_Node          *scheduler_node;
[bd12dda]213  const Scheduler_Control *scheduler;
214  ISR_lock_Context         lock_context;
215  Thread_Control          *needs_help;
216
[6a82f1ae]217  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
218  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
219
220  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
221  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
222
[bd12dda]223  _Scheduler_Acquire_critical( scheduler, &lock_context );
[6a82f1ae]224  needs_help = ( *scheduler->Operations.yield )(
225    scheduler,
226    the_thread,
227    _Thread_Scheduler_get_home_node( the_thread )
228  );
229  _Scheduler_Release_critical( scheduler, &lock_context );
[92635cb]230
[6a82f1ae]231  if ( needs_help != the_thread ) {
232    return;
233  }
234
235  node = _Chain_Next( node );
236
237  while ( node != tail ) {
238    bool success;
239
240    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
241    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
242
243    _Scheduler_Acquire_critical( scheduler, &lock_context );
244    success = ( *scheduler->Operations.ask_for_help )(
245      scheduler,
246      the_thread,
247      scheduler_node
248    );
249    _Scheduler_Release_critical( scheduler, &lock_context );
250
251    if ( success ) {
252      break;
253    }
254
255    node = _Chain_Next( node );
256  }
257#else
258  const Scheduler_Control *scheduler;
259
260  scheduler = _Scheduler_Get( the_thread );
[2df4abc]261  ( *scheduler->Operations.yield )(
262    scheduler,
263    the_thread,
264    _Thread_Scheduler_get_home_node( the_thread )
265  );
[8568341]266#endif
[0faa9dad]267}
268
[1f0d013]269/**
[92635cb]270 * @brief Blocks a thread with respect to the scheduler.
[0faa9dad]271 *
[1f0d013]272 * This routine removes @a the_thread from the scheduling decision for
273 * the scheduler. The primary task is to remove the thread from the
274 * ready queue.  It performs any necessary schedulering operations
275 * including the selection of a new heir thread.
[92635cb]276 *
277 * @param[in] the_thread The thread.
[0faa9dad]278 */
[92635cb]279RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
[0faa9dad]280{
[351c14d]281#if defined(RTEMS_SMP)
282  Chain_Node              *node;
283  const Chain_Node        *tail;
284  Scheduler_Node          *scheduler_node;
[bd12dda]285  const Scheduler_Control *scheduler;
286  ISR_lock_Context         lock_context;
287
[351c14d]288  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
289  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
290
291  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
292  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
293
[bd12dda]294  _Scheduler_Acquire_critical( scheduler, &lock_context );
[351c14d]295  ( *scheduler->Operations.block )(
296    scheduler,
297    the_thread,
298    scheduler_node
299  );
300  _Scheduler_Release_critical( scheduler, &lock_context );
301
302  node = _Chain_Next( node );
303
304  while ( node != tail ) {
305    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
306    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
[92635cb]307
[351c14d]308    _Scheduler_Acquire_critical( scheduler, &lock_context );
309    ( *scheduler->Operations.withdraw_node )(
310      scheduler,
311      the_thread,
312      scheduler_node,
313      THREAD_SCHEDULER_BLOCKED
314    );
315    _Scheduler_Release_critical( scheduler, &lock_context );
316
317    node = _Chain_Next( node );
318  }
319#else
320  const Scheduler_Control *scheduler;
321
322  scheduler = _Scheduler_Get( the_thread );
[e382a1b]323  ( *scheduler->Operations.block )(
324    scheduler,
325    the_thread,
326    _Thread_Scheduler_get_home_node( the_thread )
327  );
[351c14d]328#endif
[0faa9dad]329}
330
[1f0d013]331/**
[92635cb]332 * @brief Unblocks a thread with respect to the scheduler.
[0faa9dad]333 *
[9bfad8c]334 * This operation must fetch the latest thread priority value for this
335 * scheduler instance and update its internal state if necessary.
[92635cb]336 *
337 * @param[in] the_thread The thread.
[9bfad8c]338 *
339 * @see _Scheduler_Node_get_priority().
[0faa9dad]340 */
[92635cb]341RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
[0faa9dad]342{
[351c14d]343#if defined(RTEMS_SMP)
344  Chain_Node              *node;
345  const Chain_Node        *tail;
346  Scheduler_Node          *scheduler_node;
[bd12dda]347  const Scheduler_Control *scheduler;
348  ISR_lock_Context         lock_context;
349  Thread_Control          *needs_help;
[92635cb]350
[351c14d]351  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
352  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
353
354  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
355  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
[2403473]356
[bd12dda]357  _Scheduler_Acquire_critical( scheduler, &lock_context );
[351c14d]358  needs_help = ( *scheduler->Operations.unblock )(
359    scheduler,
360    the_thread,
361    scheduler_node
362  );
363  _Scheduler_Release_critical( scheduler, &lock_context );
[bd12dda]364
[351c14d]365  if ( needs_help != the_thread ) {
366    return;
367  }
368
369  node = _Chain_Next( node );
370
371  while ( node != tail ) {
372    bool success;
373
374    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
375    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
376
377    _Scheduler_Acquire_critical( scheduler, &lock_context );
378    success = ( *scheduler->Operations.ask_for_help )(
379      scheduler,
380      the_thread,
381      scheduler_node
382    );
383    _Scheduler_Release_critical( scheduler, &lock_context );
384
385    if ( success ) {
386      break;
387    }
388
389    node = _Chain_Next( node );
390  }
391#else
392  const Scheduler_Control *scheduler;
393
394  scheduler = _Scheduler_Get( the_thread );
[72e0bdb]395  ( *scheduler->Operations.unblock )(
396    scheduler,
397    the_thread,
398    _Thread_Scheduler_get_home_node( the_thread )
399  );
[8568341]400#endif
[0faa9dad]401}
402
[f39f667a]403/**
404 * @brief Propagates a priority change of a thread to the scheduler.
405 *
[9bfad8c]406 * On uni-processor configurations, this operation must evaluate the thread
407 * state.  In case the thread is not ready, then the priority update should be
408 * deferred to the next scheduler unblock operation.
[f39f667a]409 *
[b8a5abf]410 * The operation must update the heir and thread dispatch necessary variables
411 * in case the set of scheduled threads changes.
412 *
[f39f667a]413 * @param[in] the_thread The thread changing its priority.
[9bfad8c]414 *
415 * @see _Scheduler_Node_get_priority().
[f39f667a]416 */
[9bfad8c]417RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
[f39f667a]418{
[8568341]419#if defined(RTEMS_SMP)
[351c14d]420  Chain_Node       *node;
421  const Chain_Node *tail;
[92635cb]422
[2403473]423  _Thread_Scheduler_process_requests( the_thread );
424
[351c14d]425  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
426  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
[bd12dda]427
[351c14d]428  do {
429    Scheduler_Node          *scheduler_node;
430    const Scheduler_Control *scheduler;
431    ISR_lock_Context         lock_context;
432
433    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
434    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
435
436    _Scheduler_Acquire_critical( scheduler, &lock_context );
[97f7dac]437    ( *scheduler->Operations.update_priority )(
[351c14d]438      scheduler,
439      the_thread,
440      scheduler_node
441    );
442    _Scheduler_Release_critical( scheduler, &lock_context );
443
444    node = _Chain_Next( node );
445  } while ( node != tail );
446#else
447  const Scheduler_Control *scheduler;
448
449  scheduler = _Scheduler_Get( the_thread );
450  ( *scheduler->Operations.update_priority )(
451    scheduler,
[501043a]452    the_thread,
453    _Thread_Scheduler_get_home_node( the_thread )
454  );
[8568341]455#endif
[f39f667a]456}
457
[3a27248]458#if defined(RTEMS_SMP)
459/**
460 * @brief Changes the sticky level of the home scheduler node and propagates a
461 * priority change of a thread to the scheduler.
462 *
463 * @param[in] the_thread The thread changing its priority or sticky level.
464 *
465 * @see _Scheduler_Update_priority().
466 */
467RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
468  Thread_Control *the_thread,
469  int             sticky_level_change
470)
471{
472  Chain_Node              *node;
473  const Chain_Node        *tail;
474  Scheduler_Node          *scheduler_node;
475  const Scheduler_Control *scheduler;
476  ISR_lock_Context         lock_context;
477
478  _Thread_Scheduler_process_requests( the_thread );
479
480  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
481  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
482  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
483
484  _Scheduler_Acquire_critical( scheduler, &lock_context );
485
486  ( *scheduler->Operations.update_priority )(
487    scheduler,
488    the_thread,
489    scheduler_node
490  );
491
492  _Scheduler_Release_critical( scheduler, &lock_context );
493
494  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
495  node = _Chain_Next( node );
496
497  while ( node != tail ) {
498    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
499    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
500
501    _Scheduler_Acquire_critical( scheduler, &lock_context );
502    ( *scheduler->Operations.update_priority )(
503      scheduler,
504      the_thread,
505      scheduler_node
506    );
507    _Scheduler_Release_critical( scheduler, &lock_context );
508
509    node = _Chain_Next( node );
510  }
511}
512#endif
513
[77ff5599]514/**
515 * @brief Maps a thread priority from the user domain to the scheduler domain.
516 *
517 * Let M be the maximum scheduler priority.  The mapping must be bijective in
518 * the closed interval [0, M], e.g. _Scheduler_Unmap_priority( scheduler,
519 * _Scheduler_Map_priority( scheduler, p ) ) == p for all p in [0, M].  For
520 * other values the mapping is undefined.
521 *
522 * @param[in] scheduler The scheduler instance.
523 * @param[in] priority The user domain thread priority.
524 *
525 * @return The corresponding thread priority of the scheduler domain is returned.
526 */
527RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(
528  const Scheduler_Control *scheduler,
529  Priority_Control         priority
530)
531{
532  return ( *scheduler->Operations.map_priority )( scheduler, priority );
533}
534
535/**
536 * @brief Unmaps a thread priority from the scheduler domain to the user domain.
537 *
538 * @param[in] scheduler The scheduler instance.
539 * @param[in] priority The scheduler domain thread priority.
540 *
541 * @return The corresponding thread priority of the user domain is returned.
542 */
543RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(
544  const Scheduler_Control *scheduler,
545  Priority_Control         priority
546)
547{
548  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
549}
550
[1f0d013]551/**
[8e467384]552 * @brief Initializes a scheduler node.
[0faa9dad]553 *
[8e467384]554 * The scheduler node contains arbitrary data on function entry.  The caller
555 * must ensure that _Scheduler_Node_destroy() will be called after a
556 * _Scheduler_Node_initialize() before the memory of the scheduler node is
557 * destroyed.
558 *
559 * @param[in] scheduler The scheduler instance.
[df2177ab]560 * @param[in] node The scheduler node to initialize.
561 * @param[in] the_thread The thread of the scheduler node to initialize.
[9bfad8c]562 * @param[in] priority The thread priority.
[0faa9dad]563 */
[8e467384]564RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
[e1598a6]565  const Scheduler_Control *scheduler,
[df2177ab]566  Scheduler_Node          *node,
[9bfad8c]567  Thread_Control          *the_thread,
568  Priority_Control         priority
[0faa9dad]569)
570{
[9bfad8c]571  ( *scheduler->Operations.node_initialize )(
572    scheduler,
[df2177ab]573    node,
[9bfad8c]574    the_thread,
575    priority
576  );
[0faa9dad]577}
578
[1f0d013]579/**
[8e467384]580 * @brief Destroys a scheduler node.
[0faa9dad]581 *
[8e467384]582 * The caller must ensure that _Scheduler_Node_destroy() will be called only
583 * after a corresponding _Scheduler_Node_initialize().
584 *
585 * @param[in] scheduler The scheduler instance.
[df2177ab]586 * @param[in] node The scheduler node to destroy.
[0faa9dad]587 */
[8e467384]588RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
[e1598a6]589  const Scheduler_Control *scheduler,
[df2177ab]590  Scheduler_Node          *node
[0faa9dad]591)
592{
[df2177ab]593  ( *scheduler->Operations.node_destroy )( scheduler, node );
[0faa9dad]594}
595
[ac9d2ecc]596/**
[92635cb]597 * @brief Releases a job of a thread with respect to the scheduler.
[ac9d2ecc]598 *
[92635cb]599 * @param[in] the_thread The thread.
[300f6a48]600 * @param[in] priority_node The priority node of the job.
[9a78f8a5]601 * @param[in] deadline The deadline in watchdog ticks since boot.
[300f6a48]602 * @param[in] queue_context The thread queue context to provide the set of
603 *   threads for _Thread_Priority_update().
[ac9d2ecc]604 */
[300f6a48]605RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
606  Thread_Control       *the_thread,
607  Priority_Node        *priority_node,
608  uint64_t              deadline,
609  Thread_queue_Context *queue_context
[ac9d2ecc]610)
611{
[92635cb]612  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
613
[300f6a48]614  _Thread_queue_Context_clear_priority_updates( queue_context );
615  ( *scheduler->Operations.release_job )(
[ee0e4135]616    scheduler,
617    the_thread,
[300f6a48]618    priority_node,
619    deadline,
620    queue_context
[ee0e4135]621  );
[ac9d2ecc]622}
623
[21bdca4]624/**
625 * @brief Cancels a job of a thread with respect to the scheduler.
626 *
627 * @param[in] the_thread The thread.
[300f6a48]628 * @param[in] priority_node The priority node of the job.
629 * @param[in] queue_context The thread queue context to provide the set of
630 *   threads for _Thread_Priority_update().
[21bdca4]631 */
[300f6a48]632RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(
633  Thread_Control       *the_thread,
634  Priority_Node        *priority_node,
635  Thread_queue_Context *queue_context
[21bdca4]636)
637{
638  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
639
[300f6a48]640  _Thread_queue_Context_clear_priority_updates( queue_context );
641  ( *scheduler->Operations.cancel_job )(
642    scheduler,
643    the_thread,
644    priority_node,
645    queue_context
646  );
[21bdca4]647}
648
[1f0d013]649/**
650 * @brief Scheduler method invoked at each clock tick.
[3203e09]651 *
652 * This method is invoked at each clock tick to allow the scheduler
[1f0d013]653 * implementation to perform any activities required.  For the
[3203e09]654 * scheduler which support standard RTEMS features, this includes
655 * time-slicing management.
656 */
[03b900d]657RTEMS_INLINE_ROUTINE void _Scheduler_Tick( const Per_CPU_Control *cpu )
[3203e09]658{
[03b900d]659  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
660  Thread_Control *executing = cpu->executing;
[c5831a3f]661
[03b900d]662  if ( scheduler != NULL && executing != NULL ) {
663    ( *scheduler->Operations.tick )( scheduler, executing );
[c5831a3f]664  }
[3203e09]665}
666
[1ccb64e1]667/**
668 * @brief Starts the idle thread for a particular processor.
669 *
[a7e4de2]670 * @param[in] scheduler The scheduler instance.
[24934e36]671 * @param[in,out] the_thread The idle thread for the processor.
[a7e4de2]672 * @param[in,out] cpu The processor for the idle thread.
[1ccb64e1]673 *
674 * @see _Thread_Create_idle().
675 */
676RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
[e1598a6]677  const Scheduler_Control *scheduler,
678  Thread_Control          *the_thread,
679  Per_CPU_Control         *cpu
[1ccb64e1]680)
681{
[24934e36]682  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
[1ccb64e1]683}
684
[c5831a3f]685#if defined(RTEMS_SMP)
686RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
687  uint32_t cpu_index
688)
689{
690  return &_Scheduler_Assignments[ cpu_index ];
691}
692
693RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
694  const Scheduler_Assignment *assignment
695)
696{
697  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
698}
699
700RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
701  const Scheduler_Assignment *assignment
702)
703{
704  return assignment->scheduler != NULL;
705}
706#endif /* defined(RTEMS_SMP) */
707
708RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
709  const Scheduler_Control *scheduler,
710  uint32_t cpu_index
711)
712{
713#if defined(RTEMS_SMP)
714  const Scheduler_Assignment *assignment =
715    _Scheduler_Get_assignment( cpu_index );
716
717  return assignment->scheduler == scheduler;
718#else
719  (void) scheduler;
720  (void) cpu_index;
721
722  return true;
723#endif
724}
725
[0712d17]726#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
727
728RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
729  const Scheduler_Control *scheduler,
730  size_t                   cpusetsize,
731  cpu_set_t               *cpuset
732)
733{
734  uint32_t cpu_count = _SMP_Get_processor_count();
735  uint32_t cpu_index;
736
737  CPU_ZERO_S( cpusetsize, cpuset );
738
739  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
[c5831a3f]740#if defined(RTEMS_SMP)
741    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
742      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
743    }
744#else
745    (void) scheduler;
746
[0712d17]747    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
[c5831a3f]748#endif
[0712d17]749  }
750}
751
752RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
753  const Scheduler_Control *scheduler,
754  Thread_Control          *the_thread,
755  size_t                   cpusetsize,
756  cpu_set_t               *cpuset
757)
758{
759  (void) the_thread;
760
761  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
762
763  return true;
764}
765
766bool _Scheduler_Get_affinity(
[a92c488]767  Thread_Control *the_thread,
768  size_t          cpusetsize,
769  cpu_set_t      *cpuset
[0712d17]770);
771
772RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
773  const Scheduler_Control *scheduler,
774  Thread_Control          *the_thread,
775  size_t                   cpusetsize,
776  const cpu_set_t         *cpuset
777)
778{
779  uint32_t cpu_count = _SMP_Get_processor_count();
780  uint32_t cpu_index;
781  bool     ok = true;
782
783  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
[c5831a3f]784#if defined(RTEMS_SMP)
785    const Scheduler_Control *scheduler_of_cpu =
786      _Scheduler_Get_by_CPU_index( cpu_index );
787
788    ok = ok
[25f5730f]789      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
[c5831a3f]790        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
791          && scheduler != scheduler_of_cpu ) );
792#else
793    (void) scheduler;
794
[0712d17]795    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
[c5831a3f]796#endif
[cfe457f]797  }
798
[0712d17]799  return ok;
800}
801
802bool _Scheduler_Set_affinity(
[e135271]803  Thread_Control  *the_thread,
804  size_t           cpusetsize,
805  const cpu_set_t *cpuset
[0712d17]806);
807
808#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
[cfe457f]809
[e5ca54c9]810RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
[e1598a6]811  const Scheduler_Control *scheduler,
812  Thread_Control          *the_thread,
[e382a1b]813  Scheduler_Node          *node,
[e1598a6]814  void                  ( *extract )(
815                             const Scheduler_Control *,
[e382a1b]816                             Thread_Control *,
817                             Scheduler_Node *
818                        ),
[e1598a6]819  void                  ( *schedule )(
820                             const Scheduler_Control *,
821                             Thread_Control *,
[e382a1b]822                             bool
823                        )
[e5ca54c9]824)
825{
[e382a1b]826  ( *extract )( scheduler, the_thread, node );
[e5ca54c9]827
828  /* TODO: flash critical section? */
829
[24934e36]830  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
831    ( *schedule )( scheduler, the_thread, true );
[e5ca54c9]832  }
833}
834
[e239760]835RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
836  const Scheduler_Control *scheduler
837)
838{
839#if defined(RTEMS_SMP)
[2369b10]840  return _Scheduler_Get_context( scheduler )->processor_count;
[e239760]841#else
842  (void) scheduler;
843
844  return 1;
845#endif
846}
847
[b427a92]848RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
849{
850  return _Objects_Build_id(
851    OBJECTS_FAKE_OBJECTS_API,
852    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
853    _Objects_Local_node,
[1d72f03]854    (uint16_t) ( scheduler_index + 1 )
[b427a92]855  );
856}
857
[c8e83288]858RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
859{
860  uint32_t minimum_id = _Scheduler_Build_id( 0 );
861
862  return id - minimum_id;
863}
864
[1b67535d]865RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
866  Objects_Id                id,
[e239760]867  const Scheduler_Control **scheduler_p
[1b67535d]868)
869{
[c8e83288]870  uint32_t index = _Scheduler_Get_index_by_id( id );
[e239760]871  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];
[1b67535d]872
[e239760]873  *scheduler_p = scheduler;
[1b67535d]874
[e239760]875  return index < _Scheduler_Count
876    && _Scheduler_Get_processor_count( scheduler ) > 0;
[1b67535d]877}
878
[8fcafdd5]879RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
880{
881  const Scheduler_Control *scheduler;
882  bool ok = _Scheduler_Get_by_id( id, &scheduler );
883
884  (void) scheduler;
885
886  return ok;
887}
888
[27270b0d]889RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
890  const Scheduler_Control *scheduler
891)
892{
893  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
894}
895
[1fcac5ad]896RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_priority(
897  Thread_Control   *the_thread,
898  Priority_Control  new_priority,
899  bool              prepend_it
900)
901{
[300f6a48]902  Scheduler_Node *scheduler_node;
[1fcac5ad]903
[300f6a48]904  scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
905  _Scheduler_Node_set_priority( scheduler_node, new_priority, prepend_it );
[1fcac5ad]906}
907
[8f0c7a46]908#if defined(RTEMS_SMP)
[5c3d250]909/**
910 * @brief Gets an idle thread from the scheduler instance.
911 *
912 * @param[in] context The scheduler instance context.
913 *
914 * @retval idle An idle thread for use.  This function must always return an
915 * idle thread.  If none is available, then this is a fatal error.
916 */
917typedef Thread_Control *( *Scheduler_Get_idle_thread )(
918  Scheduler_Context *context
919);
920
921/**
922 * @brief Releases an idle thread to the scheduler instance for reuse.
923 *
924 * @param[in] context The scheduler instance context.
925 * @param[in] idle The idle thread to release
926 */
927typedef void ( *Scheduler_Release_idle_thread )(
928  Scheduler_Context *context,
929  Thread_Control    *idle
930);
931
932RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
933  Thread_Control *the_thread,
934  Scheduler_Node *node
935)
936{
937  the_thread->Scheduler.node = node;
938}
939
940RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
941  Thread_Control       *the_thread,
942  Scheduler_Node       *node,
943  const Thread_Control *previous_user_of_node
944)
945{
946  const Scheduler_Control *scheduler =
947    _Scheduler_Get_own( previous_user_of_node );
948
949  the_thread->Scheduler.control = scheduler;
950  _Scheduler_Thread_set_node( the_thread, node );
951}
952
953extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
954
955RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
956  Thread_Control         *the_thread,
957  Thread_Scheduler_state  new_state
958)
959{
960  _Assert(
961    _Scheduler_Thread_state_valid_state_changes
962      [ the_thread->Scheduler.state ][ new_state ]
963  );
[a7a8ec03]964  _Assert(
965    _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
966      || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
967      || !_System_state_Is_up( _System_state_Get() )
968  );
[5c3d250]969
970  the_thread->Scheduler.state = new_state;
971}
972
[be0366b]973RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
974  Scheduler_Node *node,
975  Thread_Control *idle
976)
977{
978  _Assert(
979    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
980      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
981  );
982  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
983  _Assert(
984    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
985  );
986
987  _Scheduler_Thread_set_node( idle, node );
988
989  _Scheduler_Node_set_user( node, idle );
990  node->idle = idle;
991}
992
[5c3d250]993/**
994 * @brief Use an idle thread for this scheduler node.
995 *
[5bd822a7]996 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
997 * helping state may use an idle thread for the scheduler node owned by itself
998 * in case it executes currently using another scheduler node or in case it is
999 * in a blocking state.
[5c3d250]1000 *
1001 * @param[in] context The scheduler instance context.
1002 * @param[in] node The node which wants to use the idle thread.
1003 * @param[in] get_idle_thread Function to get an idle thread.
1004 */
1005RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
1006  Scheduler_Context         *context,
1007  Scheduler_Node            *node,
1008  Scheduler_Get_idle_thread  get_idle_thread
1009)
1010{
1011  Thread_Control *idle = ( *get_idle_thread )( context );
1012
[be0366b]1013  _Scheduler_Set_idle_thread( node, idle );
[5c3d250]1014
1015  return idle;
1016}
1017
[be0366b]1018typedef enum {
1019  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
1020  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
1021  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
1022} Scheduler_Try_to_schedule_action;
1023
[5c3d250]1024/**
1025 * @brief Try to schedule this scheduler node.
1026 *
1027 * @param[in] context The scheduler instance context.
1028 * @param[in] node The node which wants to get scheduled.
[be0366b]1029 * @param[in] idle A potential idle thread used by a potential victim node.
[5c3d250]1030 * @param[in] get_idle_thread Function to get an idle thread.
1031 *
1032 * @retval true This node can be scheduled.
1033 * @retval false Otherwise.
1034 */
[be0366b]1035RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
1036_Scheduler_Try_to_schedule_node(
[5c3d250]1037  Scheduler_Context         *context,
1038  Scheduler_Node            *node,
[be0366b]1039  Thread_Control            *idle,
[5c3d250]1040  Scheduler_Get_idle_thread  get_idle_thread
1041)
1042{
[a7a8ec03]1043  ISR_lock_Context                  lock_context;
1044  Scheduler_Try_to_schedule_action  action;
1045  Thread_Control                   *owner;
1046  Thread_Control                   *user;
[5c3d250]1047
[be0366b]1048  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
[a7a8ec03]1049  user = _Scheduler_Node_get_user( node );
1050
1051  _Thread_Scheduler_acquire_critical( user, &lock_context );
[be0366b]1052
[5c3d250]1053  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
[351c14d]1054    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1055      _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) );
1056      _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1057    } else {
1058      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1059    }
1060
[a7a8ec03]1061    _Thread_Scheduler_release_critical( user, &lock_context );
[be0366b]1062    return action;
[5c3d250]1063  }
1064
1065  owner = _Scheduler_Node_get_owner( node );
1066
1067  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
1068    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1069      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
[5bd822a7]1070    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
[be0366b]1071      if ( idle != NULL ) {
1072        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1073      } else {
1074        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1075      }
[5c3d250]1076    } else {
1077      _Scheduler_Node_set_user( node, owner );
1078    }
1079  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1080    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1081      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
[be0366b]1082    } else if ( idle != NULL ) {
1083      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
[5c3d250]1084    } else {
1085      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1086    }
1087  } else {
1088    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1089
1090    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
1091      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
1092    } else {
[be0366b]1093      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
[5c3d250]1094    }
1095  }
1096
[a7a8ec03]1097  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1098    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
1099  }
1100
1101  _Thread_Scheduler_release_critical( user, &lock_context );
[be0366b]1102  return action;
[5c3d250]1103}
1104
1105/**
1106 * @brief Release an idle thread using this scheduler node.
1107 *
1108 * @param[in] context The scheduler instance context.
1109 * @param[in] node The node which may have an idle thread as user.
1110 * @param[in] release_idle_thread Function to release an idle thread.
1111 *
1112 * @retval idle The idle thread which used this node.
1113 * @retval NULL This node had no idle thread as an user.
1114 */
1115RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1116  Scheduler_Context             *context,
1117  Scheduler_Node                *node,
1118  Scheduler_Release_idle_thread  release_idle_thread
1119)
1120{
1121  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1122
1123  if ( idle != NULL ) {
1124    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1125
1126    node->idle = NULL;
1127    _Scheduler_Node_set_user( node, owner );
1128    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
1129    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
1130
1131    ( *release_idle_thread )( context, idle );
1132  }
1133
1134  return idle;
1135}
1136
[be0366b]1137RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1138  Scheduler_Node *needs_idle,
1139  Scheduler_Node *uses_idle,
1140  Thread_Control *idle
1141)
1142{
1143  uses_idle->idle = NULL;
1144  _Scheduler_Node_set_user(
1145    uses_idle,
1146    _Scheduler_Node_get_owner( uses_idle )
1147  );
1148  _Scheduler_Set_idle_thread( needs_idle, idle );
1149}
1150
[5c3d250]1151/**
1152 * @brief Block this scheduler node.
1153 *
1154 * @param[in] context The scheduler instance context.
[5bd822a7]1155 * @param[in] thread The thread which wants to get blocked referencing this
1156 *   node.  This is not necessarily the user of this node in case the node
1157 *   participates in the scheduler helping protocol.
[5c3d250]1158 * @param[in] node The node which wants to get blocked.
1159 * @param[in] is_scheduled This node is scheduled.
1160 * @param[in] get_idle_thread Function to get an idle thread.
1161 *
[edb020c]1162 * @retval thread_cpu The processor of the thread.  Indicates to continue with
1163 *   the blocking operation.
1164 * @retval NULL Otherwise.
[5c3d250]1165 */
[edb020c]1166RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
[5c3d250]1167  Scheduler_Context         *context,
[cceb19f4]1168  Thread_Control            *thread,
[5c3d250]1169  Scheduler_Node            *node,
1170  bool                       is_scheduled,
1171  Scheduler_Get_idle_thread  get_idle_thread
1172)
1173{
[a7a8ec03]1174  ISR_lock_Context  lock_context;
1175  Thread_Control   *old_user;
1176  Thread_Control   *new_user;
[edb020c]1177  Per_CPU_Control  *thread_cpu;
[5c3d250]1178
[a7a8ec03]1179  _Thread_Scheduler_acquire_critical( thread, &lock_context );
[edb020c]1180  thread_cpu = _Thread_Get_CPU( thread );
[351c14d]1181  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
[5bd822a7]1182  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
[a7a8ec03]1183  _Thread_Scheduler_release_critical( thread, &lock_context );
[5c3d250]1184
[5bd822a7]1185  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
1186    _Assert( thread == _Scheduler_Node_get_user( node ) );
[5c3d250]1187
[edb020c]1188    return thread_cpu;
[5bd822a7]1189  }
1190
1191  new_user = NULL;
1192
1193  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1194    if ( is_scheduled ) {
1195      _Assert( thread == _Scheduler_Node_get_user( node ) );
1196      old_user = thread;
1197      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1198    }
1199  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
1200    if ( is_scheduled ) {
1201      old_user = _Scheduler_Node_get_user( node );
1202
1203      if ( thread == old_user ) {
1204        Thread_Control *owner = _Scheduler_Node_get_owner( node );
1205
1206        if (
1207          thread != owner
1208            && owner->Scheduler.state == THREAD_SCHEDULER_READY
1209        ) {
1210          new_user = owner;
1211          _Scheduler_Node_set_user( node, new_user );
1212        } else {
1213          new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
1214        }
[cceb19f4]1215      }
1216    }
[5bd822a7]1217  } else {
1218    /* Not implemented, this is part of the OMIP support path. */
1219    _Assert(0);
[5c3d250]1220  }
1221
[cceb19f4]1222  if ( new_user != NULL ) {
[5c3d250]1223    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1224
1225    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1226    _Thread_Set_CPU( new_user, cpu );
1227    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1228  }
1229
[edb020c]1230  return NULL;
[5c3d250]1231}
1232
1233/**
1234 * @brief Unblock this scheduler node.
1235 *
1236 * @param[in] context The scheduler instance context.
1237 * @param[in] the_thread The thread which wants to get unblocked.
1238 * @param[in] node The node which wants to get unblocked.
1239 * @param[in] is_scheduled This node is scheduled.
1240 * @param[in] release_idle_thread Function to release an idle thread.
1241 *
1242 * @retval true Continue with the unblocking operation.
1243 * @retval false Otherwise.
1244 */
1245RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1246  Scheduler_Context             *context,
1247  Thread_Control                *the_thread,
1248  Scheduler_Node                *node,
1249  bool                           is_scheduled,
1250  Scheduler_Release_idle_thread  release_idle_thread
1251)
1252{
1253  bool unblock;
1254
1255  if ( is_scheduled ) {
1256    Thread_Control *old_user = _Scheduler_Node_get_user( node );
1257    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
[5bd822a7]1258    Thread_Control *idle = _Scheduler_Release_idle_thread(
1259      context,
1260      node,
1261      release_idle_thread
1262    );
1263    Thread_Control *owner = _Scheduler_Node_get_owner( node );
1264    Thread_Control *new_user;
[5c3d250]1265
1266    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
1267      _Assert( idle != NULL );
[5bd822a7]1268      new_user = the_thread;
1269    } else if ( idle != NULL ) {
1270      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1271      new_user = the_thread;
1272    } else if ( the_thread != owner ) {
1273      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
1274      _Assert( old_user != the_thread );
1275      _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
1276      new_user = the_thread;
1277      _Scheduler_Node_set_user( node, new_user );
[5c3d250]1278    } else {
1279      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
[5bd822a7]1280      _Assert( old_user != the_thread );
1281      _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1282      new_user = NULL;
[5c3d250]1283    }
1284
[5bd822a7]1285    if ( new_user != NULL ) {
1286      _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1287      _Thread_Set_CPU( new_user, cpu );
1288      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
1289    }
[5c3d250]1290
1291    unblock = false;
1292  } else {
1293    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1294
1295    unblock = true;
1296  }
1297
1298  return unblock;
1299}
1300
1301/**
1302 * @brief Asks a ready scheduler node for help.
1303 *
1304 * @param[in] node The ready node offering help.
1305 * @param[in] needs_help The thread needing help.
1306 *
1307 * @retval needs_help The thread needing help.
1308 */
1309RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
1310  Scheduler_Node *node,
1311  Thread_Control *needs_help
1312)
1313{
1314  _Scheduler_Node_set_user( node, needs_help );
1315
1316  return needs_help;
1317}
1318
1319/**
1320 * @brief Asks a scheduled scheduler node for help.
1321 *
1322 * @param[in] context The scheduler instance context.
1323 * @param[in] node The scheduled node offering help.
1324 * @param[in] offers_help The thread offering help.
1325 * @param[in] needs_help The thread needing help.
1326 * @param[in] previous_accepts_help The previous thread accepting help by this
1327 *   scheduler node.
1328 * @param[in] release_idle_thread Function to release an idle thread.
1329 *
1330 * @retval needs_help The previous thread accepting help by this scheduler node
1331 *   which was displaced by the thread needing help.
1332 * @retval NULL There are no more threads needing help.
1333 */
1334RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
1335  Scheduler_Context             *context,
1336  Scheduler_Node                *node,
1337  Thread_Control                *offers_help,
1338  Thread_Control                *needs_help,
1339  Thread_Control                *previous_accepts_help,
1340  Scheduler_Release_idle_thread  release_idle_thread
1341)
1342{
1343  Thread_Control *next_needs_help = NULL;
1344  Thread_Control *old_user = NULL;
1345  Thread_Control *new_user = NULL;
1346
1347  if (
1348    previous_accepts_help != needs_help
1349      && _Scheduler_Thread_get_node( previous_accepts_help ) == node
1350  ) {
1351    Thread_Control *idle = _Scheduler_Release_idle_thread(
1352      context,
1353      node,
1354      release_idle_thread
1355    );
1356
1357    if ( idle != NULL ) {
1358      old_user = idle;
1359    } else {
1360      _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
1361      old_user = previous_accepts_help;
1362    }
1363
1364    if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1365      new_user = needs_help;
1366    } else {
[5bd822a7]1367      _Assert(
1368        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
1369          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
1370      );
[5c3d250]1371      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
1372
1373      new_user = offers_help;
1374    }
1375
1376    if ( previous_accepts_help != offers_help ) {
1377      next_needs_help = previous_accepts_help;
1378    }
1379  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1380    Thread_Control *idle = _Scheduler_Release_idle_thread(
1381      context,
1382      node,
1383      release_idle_thread
1384    );
1385
1386    if ( idle != NULL ) {
1387      old_user = idle;
1388    } else {
1389      old_user = _Scheduler_Node_get_user( node );
1390    }
1391
1392    new_user = needs_help;
1393  } else {
1394    _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
1395  }
1396
1397  if ( new_user != old_user ) {
1398    Per_CPU_Control *cpu_self = _Per_CPU_Get();
1399    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
1400
1401    _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
1402    _Scheduler_Thread_set_scheduler_and_node(
1403      old_user,
[300f6a48]1404      _Thread_Scheduler_get_own_node( old_user ),
[5c3d250]1405      old_user
1406    );
1407
1408    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
1409    _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
1410
1411    _Scheduler_Node_set_user( node, new_user );
1412    _Thread_Set_CPU( new_user, cpu );
1413    _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
1414  }
1415
1416  return next_needs_help;
1417}
1418
1419/**
1420 * @brief Asks a blocked scheduler node for help.
1421 *
1422 * @param[in] context The scheduler instance context.
1423 * @param[in] node The scheduled node offering help.
1424 * @param[in] offers_help The thread offering help.
1425 * @param[in] needs_help The thread needing help.
1426 *
1427 * @retval true Enqueue this scheduler node.
1428 * @retval false Otherwise.
1429 */
1430RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
1431  Scheduler_Context *context,
1432  Scheduler_Node    *node,
1433  Thread_Control    *offers_help,
1434  Thread_Control    *needs_help
1435)
1436{
1437  bool enqueue;
1438
1439  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
1440
1441  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
1442    _Scheduler_Node_set_user( node, needs_help );
1443    _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
1444
1445    enqueue = true;
1446  } else {
1447    enqueue = false;
1448  }
1449
1450  return enqueue;
1451}
[8f0c7a46]1452#endif
1453
[d37adfe5]1454RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
1455  Thread_Control *new_heir,
1456  bool            force_dispatch
1457)
1458{
1459  Thread_Control *heir = _Thread_Heir;
1460
1461  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1462#if defined(RTEMS_SMP)
[baa13626]1463    /*
1464     * We need this state only for _Thread_Get_CPU_time_used().  Cannot use
1465     * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1466     * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1467     * schedulers.
1468     */
1469    heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1470    new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
[d37adfe5]1471#endif
1472    _Thread_Update_CPU_time_used( heir, _Thread_Get_CPU( heir ) );
1473    _Thread_Heir = new_heir;
1474    _Thread_Dispatch_necessary = true;
1475  }
1476}
1477
[c0bd006]1478RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(
1479  const Scheduler_Control *new_scheduler,
1480  Thread_Control          *the_thread,
1481  Priority_Control         priority
1482)
1483{
[5d6b211]1484  Scheduler_Node *new_scheduler_node;
1485  Scheduler_Node *old_scheduler_node;
[c0bd006]1486
1487  if (
1488    _Thread_Owns_resources( the_thread )
1489      || the_thread->Wait.queue != NULL
1490  ) {
1491    return STATUS_RESOURCE_IN_USE;
1492  }
1493
[5d6b211]1494  old_scheduler_node = _Thread_Scheduler_get_own_node( the_thread );
1495  _Priority_Plain_extract(
1496    &old_scheduler_node->Wait.Priority,
1497    &the_thread->Real_priority
1498  );
[c0bd006]1499
[5d6b211]1500  if ( !_Priority_Is_empty( &old_scheduler_node->Wait.Priority ) ) {
[300f6a48]1501    _Priority_Plain_insert(
[5d6b211]1502      &old_scheduler_node->Wait.Priority,
[300f6a48]1503      &the_thread->Real_priority,
1504      the_thread->Real_priority.priority
1505    );
1506    return STATUS_RESOURCE_IN_USE;
1507  }
1508
[5d6b211]1509#if defined(RTEMS_SMP)
[2403473]1510  _Thread_Scheduler_process_requests( the_thread );
[5d6b211]1511  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1512    the_thread,
1513    _Scheduler_Get_index( new_scheduler )
1514  );
1515#else
1516  new_scheduler_node = old_scheduler_node;
1517#endif
1518
[300f6a48]1519  the_thread->Start.initial_priority = priority;
1520  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1521  _Priority_Initialize_one(
[5d6b211]1522    &new_scheduler_node->Wait.Priority,
[300f6a48]1523    &the_thread->Real_priority
1524  );
[c0bd006]1525
1526#if defined(RTEMS_SMP)
[266d383]1527  _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1528  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1529  _Chain_Initialize_one(
1530    &the_thread->Scheduler.Wait_nodes,
1531    &new_scheduler_node->Thread.Wait_node
1532  );
[ebdd2a3]1533  _Chain_Extract_unprotected(
1534    &old_scheduler_node->Thread.Scheduler_node.Chain
1535  );
1536  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1537  _Chain_Initialize_one(
1538    &the_thread->Scheduler.Scheduler_nodes,
1539    &new_scheduler_node->Thread.Scheduler_node.Chain
1540  );
[266d383]1541
[c0bd006]1542  {
1543    const Scheduler_Control *old_scheduler;
1544
1545    old_scheduler = _Scheduler_Get( the_thread );
1546
1547    if ( old_scheduler != new_scheduler ) {
1548      States_Control current_state;
1549
1550      current_state = the_thread->current_state;
1551
1552      if ( _States_Is_ready( current_state ) ) {
1553        _Scheduler_Block( the_thread );
1554      }
1555
1556      the_thread->Scheduler.own_control = new_scheduler;
1557      the_thread->Scheduler.control = new_scheduler;
[5d6b211]1558      the_thread->Scheduler.own_node = new_scheduler_node;
1559      the_thread->Scheduler.node = new_scheduler_node;
1560      _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
[c0bd006]1561
1562      if ( _States_Is_ready( current_state ) ) {
1563        _Scheduler_Unblock( the_thread );
1564      }
1565
1566      return STATUS_SUCCESSFUL;
1567    }
1568  }
1569#endif
1570
[5d6b211]1571  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
[c0bd006]1572  _Scheduler_Update_priority( the_thread );
1573  return STATUS_SUCCESSFUL;
1574}
1575
[1f0d013]1576/** @} */
[0faa9dad]1577
[c6e21ee1]1578#ifdef __cplusplus
1579}
1580#endif
1581
[0faa9dad]1582#endif
1583/* end of include file */
Note: See TracBrowser for help on using the repository browser.