source: rtems/cpukit/score/include/rtems/score/schedulerimpl.h @ c5831a3f

4.115
Last change on this file since c5831a3f was c5831a3f, checked in by Sebastian Huber <sebastian.huber@…>, on 04/09/14 at 13:07:54

score: Add clustered/partitioned scheduling

Clustered/partitioned scheduling helps to control the worst-case
latencies in the system. The goal is to reduce the amount of shared
state in the system and thus prevention of lock contention. Modern
multi-processor systems tend to have several layers of data and
instruction caches. With clustered/partitioned scheduling it is
possible to honour the cache topology of a system and thus avoid
expensive cache synchronization traffic.

We have clustered scheduling in case the set of processors of a system
is partitioned into non-empty pairwise-disjoint subsets. These subsets
are called clusters. Clusters with a cardinality of one are partitions.
Each cluster is owned by exactly one scheduler instance.

  • Property mode set to 100644
File size: 15.1 KB
RevLine 
[1f0d013]1/**
2 * @file
3 *
4 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
[0faa9dad]5 *
[1f0d013]6 * This inline file contains all of the inlined routines associated with
7 * the manipulation of the scheduler.
[0faa9dad]8 */
9
10/*
11 *  Copyright (C) 2010 Gedare Bloom.
[010192d]12 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
[0faa9dad]13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
[c499856]16 *  http://www.rtems.org/license/LICENSE.
[0faa9dad]17 */
18
[c6e21ee1]19#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
20#define _RTEMS_SCORE_SCHEDULERIMPL_H
21
22#include <rtems/score/scheduler.h>
[0712d17]23#include <rtems/score/cpusetimpl.h>
[c5831a3f]24#include <rtems/score/smpimpl.h>
[e5ca54c9]25#include <rtems/score/threadimpl.h>
[0faa9dad]26
[c6e21ee1]27#ifdef __cplusplus
28extern "C" {
29#endif
[0faa9dad]30
31/**
[1f0d013]32 * @addtogroup ScoreScheduler
[0faa9dad]33 */
[b697bc6]34/**@{**/
[0faa9dad]35
[c6e21ee1]36/**
37 *  @brief Initializes the scheduler to the policy chosen by the user.
38 *
39 *  This routine initializes the scheduler to the policy chosen by the user
40 *  through confdefs, or to the priority scheduler with ready chains by
41 *  default.
42 */
43void _Scheduler_Handler_initialization( void );
44
[c5831a3f]45RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
46  uint32_t cpu_index
47)
48{
49#if defined(RTEMS_SMP)
50  return _Scheduler_Assignments[ cpu_index ].scheduler;
51#else
52  (void) cpu_index;
53
54  return &_Scheduler_Table[ 0 ];
55#endif
56}
57
58RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
59  const Per_CPU_Control *cpu
60)
61{
62  uint32_t cpu_index = _Per_CPU_Get_index( cpu );
63
64  return _Scheduler_Get_by_CPU_index( cpu_index );
65}
66
[0faa9dad]67/**
[1f0d013]68 * The preferred method to add a new scheduler is to define the jump table
69 * entries and add a case to the _Scheduler_Initialize routine.
[0faa9dad]70 *
[1f0d013]71 * Generic scheduling implementations that rely on the ready queue only can
[0faa9dad]72 * be found in the _Scheduler_queue_XXX functions.
73 */
74
[1f0d013]75/*
76 * Passing the Scheduler_Control* to these functions allows for multiple
77 * scheduler's to exist simultaneously, which could be useful on an SMP
78 * system.  Then remote Schedulers may be accessible.  How to protect such
[0faa9dad]79 * accesses remains an open problem.
80 */
81
[1f0d013]82/**
83 * @brief Scheduler schedule.
[0faa9dad]84 *
[1f0d013]85 * This kernel routine implements the scheduling decision logic for
86 * the scheduler. It does NOT dispatch.
[e5ca54c9]87 *
[24934e36]88 * @param[in] the_thread The thread which state changed previously.
[0faa9dad]89 */
[24934e36]90RTEMS_INLINE_ROUTINE void _Scheduler_Schedule(
[e1598a6]91  const Scheduler_Control *scheduler,
92  Thread_Control          *the_thread
[24934e36]93)
[0faa9dad]94{
[24934e36]95  ( *scheduler->Operations.schedule )( scheduler, the_thread );
[0faa9dad]96}
97
[1f0d013]98/**
[6eba7c85]99 * @brief Scheduler yield with a particular thread.
[0faa9dad]100 *
[6eba7c85]101 * This routine is invoked when a thread wishes to voluntarily transfer control
102 * of the processor to another thread.
103 *
[24934e36]104 * @param[in] the_thread The yielding thread.
[0faa9dad]105 */
[6eba7c85]106RTEMS_INLINE_ROUTINE void _Scheduler_Yield(
[e1598a6]107  const Scheduler_Control *scheduler,
108  Thread_Control          *the_thread
[6eba7c85]109)
[0faa9dad]110{
[24934e36]111  ( *scheduler->Operations.yield )( scheduler, the_thread );
[0faa9dad]112}
113
[1f0d013]114/**
115 * @brief Scheduler block.
[0faa9dad]116 *
[1f0d013]117 * This routine removes @a the_thread from the scheduling decision for
118 * the scheduler. The primary task is to remove the thread from the
119 * ready queue.  It performs any necessary schedulering operations
120 * including the selection of a new heir thread.
[0faa9dad]121 */
[1f0d013]122RTEMS_INLINE_ROUTINE void _Scheduler_Block(
[e1598a6]123  const Scheduler_Control *scheduler,
124  Thread_Control               *the_thread
[0faa9dad]125)
126{
[24934e36]127  ( *scheduler->Operations.block )( scheduler, the_thread );
[0faa9dad]128}
129
[1f0d013]130/**
131 * @brief Scheduler unblock.
[0faa9dad]132 *
[1f0d013]133 * This routine adds @a the_thread to the scheduling decision for
134 * the scheduler.  The primary task is to add the thread to the
135 * ready queue per the schedulering policy and update any appropriate
136 * scheduling variables, for example the heir thread.
[0faa9dad]137 */
138RTEMS_INLINE_ROUTINE void _Scheduler_Unblock(
[e1598a6]139  const Scheduler_Control *scheduler,
140  Thread_Control          *the_thread
[0faa9dad]141)
142{
[24934e36]143  ( *scheduler->Operations.unblock )( scheduler, the_thread );
[0faa9dad]144}
145
[1f0d013]146/**
147 * @brief Scheduler allocate.
[0faa9dad]148 *
149 * This routine allocates @a the_thread->scheduler
150 */
[69aa3349]151RTEMS_INLINE_ROUTINE bool _Scheduler_Allocate(
[e1598a6]152  const Scheduler_Control *scheduler,
153  Thread_Control          *the_thread
[0faa9dad]154)
155{
[24934e36]156  return ( *scheduler->Operations.allocate )( scheduler, the_thread );
[0faa9dad]157}
158
[1f0d013]159/**
160 * @brief Scheduler free.
[0faa9dad]161 *
162 * This routine frees @a the_thread->scheduler
163 */
[1f0d013]164RTEMS_INLINE_ROUTINE void _Scheduler_Free(
[e1598a6]165  const Scheduler_Control *scheduler,
166  Thread_Control          *the_thread
[0faa9dad]167)
168{
[24934e36]169  ( *scheduler->Operations.free )( scheduler, the_thread );
[0faa9dad]170}
171
[1f0d013]172/**
173 * @brief Scheduler update.
[0faa9dad]174 *
175 * This routine updates @a the_thread->scheduler
176 */
[1f0d013]177RTEMS_INLINE_ROUTINE void _Scheduler_Update(
[e1598a6]178  const Scheduler_Control *scheduler,
179  Thread_Control          *the_thread
[0faa9dad]180)
181{
[24934e36]182  ( *scheduler->Operations.update )( scheduler, the_thread );
[108c4b0]183}
184
[1f0d013]185/**
186 * @brief Scheduler enqueue.
[108c4b0]187 *
188 * This routine enqueue @a the_thread->scheduler
189 */
[1f0d013]190RTEMS_INLINE_ROUTINE void _Scheduler_Enqueue(
[e1598a6]191  const Scheduler_Control *scheduler,
192  Thread_Control          *the_thread
[108c4b0]193)
194{
[24934e36]195  ( *scheduler->Operations.enqueue )( scheduler, the_thread );
[108c4b0]196}
197
[1f0d013]198/**
199 * @brief Scheduler enqueue first.
[108c4b0]200 *
201 * This routine enqueue_first @a the_thread->scheduler
202 */
[1f0d013]203RTEMS_INLINE_ROUTINE void _Scheduler_Enqueue_first(
[e1598a6]204  const Scheduler_Control *scheduler,
205  Thread_Control          *the_thread
[108c4b0]206)
207{
[24934e36]208  ( *scheduler->Operations.enqueue_first )( scheduler, the_thread );
[108c4b0]209}
210
[1f0d013]211/**
212 * @brief Scheduler extract.
[108c4b0]213 *
214 * This routine extract @a the_thread->scheduler
215 */
[1f0d013]216RTEMS_INLINE_ROUTINE void _Scheduler_Extract(
[e1598a6]217  const Scheduler_Control *scheduler,
218  Thread_Control          *the_thread
[108c4b0]219)
220{
[24934e36]221  ( *scheduler->Operations.extract )( scheduler, the_thread );
[0faa9dad]222}
223
[ac9d2ecc]224/**
[1f0d013]225 * @brief Scheduler priority compare.
[ac9d2ecc]226 *
227 * This routine compares two priorities.
228 */
229RTEMS_INLINE_ROUTINE int _Scheduler_Priority_compare(
[e1598a6]230  const Scheduler_Control *scheduler,
231  Priority_Control         p1,
232  Priority_Control         p2
[ac9d2ecc]233)
234{
[24934e36]235  return ( *scheduler->Operations.priority_compare )( p1, p2 );
[ac9d2ecc]236}
237
238/**
[1f0d013]239 * @brief Scheduler release job.
[ac9d2ecc]240 *
241 * This routine is called when a new period of task is issued.
242 */
243RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
[e1598a6]244  const Scheduler_Control *scheduler,
245  Thread_Control          *the_thread,
246  uint32_t                 length
[ac9d2ecc]247)
248{
[24934e36]249  ( *scheduler->Operations.release_job )( scheduler, the_thread, length );
[ac9d2ecc]250}
251
[1f0d013]252/**
253 * @brief Scheduler method invoked at each clock tick.
[3203e09]254 *
255 * This method is invoked at each clock tick to allow the scheduler
[1f0d013]256 * implementation to perform any activities required.  For the
[3203e09]257 * scheduler which support standard RTEMS features, this includes
258 * time-slicing management.
259 */
[c5831a3f]260RTEMS_INLINE_ROUTINE void _Scheduler_Tick( void )
[3203e09]261{
[c5831a3f]262  uint32_t cpu_count = _SMP_Get_processor_count();
263  uint32_t cpu_index;
264
265  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
266    const Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
267    const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
268
269    if ( scheduler != NULL ) {
270      ( *scheduler->Operations.tick )( scheduler, cpu->executing );
271    }
272  }
[3203e09]273}
274
[1ccb64e1]275/**
276 * @brief Starts the idle thread for a particular processor.
277 *
[24934e36]278 * @param[in,out] the_thread The idle thread for the processor.
[961669d]279 * @parma[in,out] processor The processor for the idle thread.
[1ccb64e1]280 *
281 * @see _Thread_Create_idle().
282 */
283RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
[e1598a6]284  const Scheduler_Control *scheduler,
285  Thread_Control          *the_thread,
286  Per_CPU_Control         *cpu
[1ccb64e1]287)
288{
[24934e36]289  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
[1ccb64e1]290}
291
[c5831a3f]292#if defined(RTEMS_SMP)
293RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
294  uint32_t cpu_index
295)
296{
297  return &_Scheduler_Assignments[ cpu_index ];
298}
299
300RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
301  const Scheduler_Assignment *assignment
302)
303{
304  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
305}
306
307RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
308  const Scheduler_Assignment *assignment
309)
310{
311  return assignment->scheduler != NULL;
312}
313#endif /* defined(RTEMS_SMP) */
314
315RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
316  const Scheduler_Control *scheduler,
317  uint32_t cpu_index
318)
319{
320#if defined(RTEMS_SMP)
321  const Scheduler_Assignment *assignment =
322    _Scheduler_Get_assignment( cpu_index );
323
324  return assignment->scheduler == scheduler;
325#else
326  (void) scheduler;
327  (void) cpu_index;
328
329  return true;
330#endif
331}
332
[0712d17]333#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
334
335RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
336  const Scheduler_Control *scheduler,
337  size_t                   cpusetsize,
338  cpu_set_t               *cpuset
339)
340{
341  uint32_t cpu_count = _SMP_Get_processor_count();
342  uint32_t cpu_index;
343
344  CPU_ZERO_S( cpusetsize, cpuset );
345
346  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
[c5831a3f]347#if defined(RTEMS_SMP)
348    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
349      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
350    }
351#else
352    (void) scheduler;
353
[0712d17]354    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
[c5831a3f]355#endif
[0712d17]356  }
357}
358
359RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
360  const Scheduler_Control *scheduler,
361  Thread_Control          *the_thread,
362  size_t                   cpusetsize,
363  cpu_set_t               *cpuset
364)
365{
366  (void) the_thread;
367
368  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );
369
370  return true;
371}
372
373bool _Scheduler_Get_affinity(
374  const Scheduler_Control *scheduler,
375  Thread_Control          *the_thread,
376  size_t                   cpusetsize,
377  cpu_set_t               *cpuset
378);
379
[c5831a3f]380RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
381  Thread_Control *the_thread
382)
383{
384#if defined(RTEMS_SMP)
385  return the_thread->scheduler;
386#else
387  (void) the_thread;
388
389  return &_Scheduler_Table[ 0 ];
390#endif
391}
392
393RTEMS_INLINE_ROUTINE bool _Scheduler_Set(
394  const Scheduler_Control *scheduler,
395  Thread_Control          *the_thread
396)
397{
398  bool ok;
399
400  if ( _States_Is_dormant( the_thread->current_state ) ) {
401#if defined(RTEMS_SMP)
402    _Scheduler_Free( _Scheduler_Get( the_thread ), the_thread );
403    the_thread->scheduler = scheduler;
404    _Scheduler_Allocate( scheduler, the_thread );
405    _Scheduler_Update( scheduler, the_thread );
406#else
407    (void) scheduler;
408#endif
409
410    ok = true;
411  } else {
412    ok = false;
413  }
414
415  return ok;
416}
417
[0712d17]418RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
419  const Scheduler_Control *scheduler,
420  Thread_Control          *the_thread,
421  size_t                   cpusetsize,
422  const cpu_set_t         *cpuset
423)
424{
425  size_t   cpu_max   = _CPU_set_Maximum_CPU_count( cpusetsize );
426  uint32_t cpu_count = _SMP_Get_processor_count();
427  uint32_t cpu_index;
428  bool     ok = true;
429
430  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
[c5831a3f]431#if defined(RTEMS_SMP)
432    const Scheduler_Control *scheduler_of_cpu =
433      _Scheduler_Get_by_CPU_index( cpu_index );
434
435    ok = ok
436      && ( ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
437          && scheduler == scheduler_of_cpu )
438        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
439          && scheduler != scheduler_of_cpu ) );
440#else
441    (void) scheduler;
442
[0712d17]443    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
[c5831a3f]444#endif
[cfe457f]445  }
446
[0712d17]447  for ( ; cpu_index < cpu_max ; ++cpu_index ) {
448    ok = ok && !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
[cfe457f]449  }
[0712d17]450
[c5831a3f]451  if ( ok ) {
452    ok = _Scheduler_Set( scheduler, the_thread );
453  }
454
[0712d17]455  return ok;
456}
457
458bool _Scheduler_Set_affinity(
459  Thread_Control          *the_thread,
460  size_t                   cpusetsize,
461  const cpu_set_t         *cpuset
462);
463
464#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
[cfe457f]465
[e5ca54c9]466RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
467  Thread_Control *heir,
468  bool force_dispatch
469)
470{
471  Thread_Control *executing = _Thread_Executing;
472
473  _Thread_Heir = heir;
474
475  if ( executing != heir && ( force_dispatch || executing->is_preemptible ) )
476    _Thread_Dispatch_necessary = true;
477}
478
479RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
[e1598a6]480  const Scheduler_Control *scheduler,
481  Thread_Control          *the_thread,
482  void                  ( *extract )(
483                             const Scheduler_Control *,
484                             Thread_Control * ),
485  void                  ( *schedule )(
486                             const Scheduler_Control *,
487                             Thread_Control *,
488                             bool )
[e5ca54c9]489)
490{
[24934e36]491  ( *extract )( scheduler, the_thread );
[e5ca54c9]492
493  /* TODO: flash critical section? */
494
[24934e36]495  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
496    ( *schedule )( scheduler, the_thread, true );
[e5ca54c9]497  }
498}
499
[c6e21ee1]500/**
[298d0fd]501 * @brief Returns true if @p1 encodes a lower priority than @a p2 in the
502 * intuitive sense of priority.
[c6e21ee1]503 */
[298d0fd]504RTEMS_INLINE_ROUTINE bool _Scheduler_Is_priority_lower_than(
[e1598a6]505  const Scheduler_Control *scheduler,
506  Priority_Control         p1,
507  Priority_Control         p2
[298d0fd]508)
509{
[24934e36]510  return _Scheduler_Priority_compare( scheduler, p1,  p2 ) < 0;
[298d0fd]511}
[c6e21ee1]512
513/**
[298d0fd]514 * @brief Returns true if @p1 encodes a higher priority than @a p2 in the
515 * intuitive sense of priority.
516 */
517RTEMS_INLINE_ROUTINE bool _Scheduler_Is_priority_higher_than(
[e1598a6]518  const Scheduler_Control *scheduler,
519  Priority_Control         p1,
520  Priority_Control         p2
[298d0fd]521)
522{
[24934e36]523  return _Scheduler_Priority_compare( scheduler, p1,  p2 ) > 0;
[298d0fd]524}
525
526/**
527 * @brief Returns the priority encoding @a p1 or @a p2 with the higher priority
[c6e21ee1]528 * in the intuitive sense of priority.
529 */
[298d0fd]530RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Highest_priority_of_two(
[e1598a6]531  const Scheduler_Control *scheduler,
532  Priority_Control         p1,
533  Priority_Control         p2
[298d0fd]534)
535{
[24934e36]536  return _Scheduler_Is_priority_higher_than( scheduler, p1, p2 ) ? p1 : p2;
[298d0fd]537}
[c6e21ee1]538
[c2225d7]539/**
540 * @brief Sets the thread priority to @a priority if it is higher than the
541 * current priority of the thread in the intuitive sense of priority.
542 */
543RTEMS_INLINE_ROUTINE void _Scheduler_Set_priority_if_higher(
[e1598a6]544  const Scheduler_Control *scheduler,
545  Thread_Control          *the_thread,
546  Priority_Control         priority
[c2225d7]547)
548{
549  Priority_Control current = the_thread->current_priority;
550
[24934e36]551  if ( _Scheduler_Is_priority_higher_than( scheduler, priority, current ) ) {
[c2225d7]552    _Thread_Set_priority( the_thread, priority );
553  }
554}
555
556/**
557 * @brief Changes the thread priority to @a priority if it is higher than the
558 * current priority of the thread in the intuitive sense of priority.
559 */
560RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority_if_higher(
[e1598a6]561  const Scheduler_Control *scheduler,
562  Thread_Control          *the_thread,
563  Priority_Control         priority,
564  bool                     prepend_it
[c2225d7]565)
566{
567  Priority_Control current = the_thread->current_priority;
568
[24934e36]569  if ( _Scheduler_Is_priority_higher_than( scheduler, priority, current ) ) {
[c2225d7]570    _Thread_Change_priority( the_thread, priority, prepend_it );
571  }
572}
573
[b427a92]574RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
575{
576  return _Objects_Build_id(
577    OBJECTS_FAKE_OBJECTS_API,
578    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
579    _Objects_Local_node,
580    scheduler_index + 1
581  );
582}
583
[1b67535d]584RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
585  Objects_Id                id,
586  const Scheduler_Control **scheduler
587)
588{
589  uint32_t minimum_id = _Scheduler_Build_id( 0 );
590  uint32_t index = id - minimum_id;
591
592  *scheduler = &_Scheduler_Table[ index ];
593
594  return index < _Scheduler_Count;
595}
596
[27270b0d]597RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
598  const Scheduler_Control *scheduler
599)
600{
601  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
602}
603
[1f0d013]604/** @} */
[0faa9dad]605
[c6e21ee1]606#ifdef __cplusplus
607}
608#endif
609
[0faa9dad]610#endif
611/* end of include file */
Note: See TracBrowser for help on using the repository browser.