Changeset 238629f in rtems


Ignore:
Timestamp:
May 19, 2014, 8:26:55 PM (5 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.11, master
Children:
897a0935
Parents:
647859e
git-author:
Joel Sherrill <joel.sherrill@…> (05/19/14 20:26:55)
git-committer:
Jennifer Averett <jennifer.averett@…> (06/11/14 14:27:06)
Message:

Add SMP Priority Scheduler with Affinity

This scheduler attempts to account for needed thread migrations caused
as a side-effect of a thread state, affinity, or priority change operation.

This scheduler has its own allocate_processor handler named
_Scheduler_SMP_Allocate_processor_exact() because
_Scheduler_SMP_Allocate_processor() attempts to prevent an executing
thread from moving off its current CPU without considering affinity.
Without this, the scheduler makes all the right decisions and then
they are discarded at the end.

==Side Effects of Adding This Scheduler==

Added Thread_Control * parameter to Scheduler_SMP_Get_highest_ready type
so methods looking for the highest ready thread can filter by the processor
on which the thread blocking resides. This allows affinity to be considered.
Simple Priority SMP and Priority SMP ignore this parameter.

+ Added get_lowest_scheduled argument to _Scheduler_SMP_Enqueue_ordered().

+ Added allocate_processor argument to the following methods:

  • _Scheduler_SMP_Block()
  • _Scheduler_SMP_Enqueue_scheduled_ordered()
  • _Scheduler_SMP_Enqueue_scheduled_ordered()

+ schedulerprioritysmpimpl.h is a new file with prototypes for methods

which were formerly static in schedulerprioritysmp.c but now need to
be public to be shared with this scheduler.

NOTE:

_Scheduler_SMP_Get_lowest_ready() appears to have a path which would
allow it to return a NULL. Previously, _Scheduler_SMP_Enqueue_ordered()
would have asserted on it. If it cannot return a NULL,
_Scheduler_SMP_Get_lowest_ready() should have an assertions.

Location:
cpukit/score
Files:
1 added
7 edited

Legend:

Unmodified
Added
Removed
  • cpukit/score/Makefile.am

    r647859e r238629f  
    115115include_rtems_score_HEADERS += include/rtems/score/atomic.h
    116116include_rtems_score_HEADERS += include/rtems/score/cpustdatomic.h
     117include_rtems_score_HEADERS += include/rtems/score/schedulerprioritysmpimpl.h
    117118include_rtems_score_HEADERS += include/rtems/score/schedulerpriorityaffinitysmp.h
    118119include_rtems_score_HEADERS += include/rtems/score/schedulersimplesmp.h
  • cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h

    r647859e r238629f  
    5353    _Scheduler_default_Schedule, \
    5454    _Scheduler_priority_SMP_Yield, \
    55     _Scheduler_priority_SMP_Block, \
    56     _Scheduler_priority_SMP_Unblock, \
    57     _Scheduler_priority_SMP_Change_priority, \
     55    _Scheduler_priority_affinity_SMP_Block, \
     56    _Scheduler_priority_affinity_SMP_Unblock, \
     57    _Scheduler_priority_affinity_SMP_Change_priority, \
    5858    _Scheduler_priority_affinity_SMP_Node_initialize, \
    5959    _Scheduler_default_Node_destroy, \
     
    6868
    6969/**
    70  *  @brief Allocates @a the_thread->scheduler.
     70 *  @brief Initializes per thread scheduler information
    7171 *
    72  *  This routine allocates @a the_thread->scheduler.
     72 *  This routine allocates @a thread->scheduler.
    7373 *
    7474 *  @param[in] scheduler points to the scheduler specific information.
    75  *  @param[in] the_thread is the thread the scheduler is allocating
     75 *  @param[in] thread is the thread the scheduler is allocating
    7676 *             management memory for.
    7777 */
    7878void _Scheduler_priority_affinity_SMP_Node_initialize(
    7979  const Scheduler_Control *scheduler,
    80   Thread_Control          *the_thread
     80  Thread_Control          *thread
    8181);
    8282
    8383/**
    84  * @brief Get affinity for the priority affinity smp scheduler.
     84 * @brief SMP Priority Affinity Scheduler Block Operation
     85 *
     86 * This method is the block operation for this scheduler.
     87 *
     88 * @param[in] scheduler is the scheduler instance information
     89 * @param[in] thread is the thread to block
     90 */
     91void _Scheduler_priority_affinity_SMP_Block(
     92  const Scheduler_Control *scheduler,
     93  Thread_Control          *thread
     94);
     95
     96/**
     97 * @brief SMP Priority Affinity Scheduler Unblock Operation
     98 *
     99 * This method is the unblock operation for this scheduler.
     100 *
     101 * @param[in] scheduler is the scheduler instance information
     102 * @param[in] thread is the thread to unblock
     103 */
     104void _Scheduler_priority_affinity_SMP_Unblock(
     105  const Scheduler_Control *scheduler,
     106  Thread_Control          *thread
     107);
     108
     109/**
     110 * @brief Get affinity for the priority affinity SMP scheduler.
    85111 *
    86112 * @param[in] scheduler The scheduler of the thread.
     
    99125);
    100126
     127/**
     128 * @brief Change priority for the priority affinity SMP scheduler.
     129 *
     130 * @param[in] scheduler The scheduler of the thread.
     131 * @param[in] thread The associated thread.
     132 * @param[in] new_priority The new priority for the thread.
     133 * @param[in] prepend_it Append or prepend the thread to its priority FIFO.
     134 */
     135void _Scheduler_priority_affinity_SMP_Change_priority(
     136  const Scheduler_Control *scheduler,
     137  Thread_Control          *the_thread,
     138  Priority_Control         new_priority,
     139  bool                     prepend_it
     140);
     141
    101142/**
    102  * @brief Set affinity for the priority affinity smp scheduler.
     143 * @brief Set affinity for the priority affinity SMP scheduler.
    103144 *
    104145 * @param[in] scheduler The scheduler of the thread.
     
    107148 * @param[in] cpuset Affinity new affinity set.
    108149 *
    109  * @retval 0 Successful
     150 * @retval true if successful
     151 * @retval false if unsuccessful
    110152 */
    111153bool _Scheduler_priority_affinity_SMP_Set_affinity(
     
    113155  Thread_Control          *thread,
    114156  size_t                   cpusetsize,
    115   cpu_set_t               *cpuset
     157  const cpu_set_t         *cpuset
    116158);
    117159
     
    119161 * @brief Scheduler node specialization for Deterministic Priority Affinity SMP
    120162 * schedulers.
     163 *
     164 * This is a per thread structure.
    121165 */
    122166typedef struct {
     
    138182#endif /* __cplusplus */
    139183
    140 #endif /* _RTEMS_SCORE_SCHEDULERPRIORITYSMP_H */
     184#endif /* _RTEMS_SCORE_SCHEDULERPRIORITYAFFINITYSMP_H */
  • cpukit/score/include/rtems/score/schedulersmpimpl.h

    r647859e r238629f  
    276276
    277277typedef Thread_Control *( *Scheduler_SMP_Get_highest_ready )(
    278   Scheduler_Context *context
     278  Scheduler_Context *context,
     279  Thread_Control    *blocking
     280);
     281
     282typedef Thread_Control *( *Scheduler_SMP_Get_lowest_scheduled )(
     283  Scheduler_Context *context,
     284  Thread_Control    *thread,
     285  Chain_Node_order   order
    279286);
    280287
     
    303310  Scheduler_Context *context,
    304311  Thread_Control *thread_to_enqueue
     312);
     313
     314typedef void ( *Scheduler_SMP_Allocate_processor )(
     315  Scheduler_SMP_Context *self,
     316  Thread_Control *scheduled,
     317  Thread_Control *victim
    305318);
    306319
     
    383396}
    384397
    385 static inline void _Scheduler_SMP_Allocate_processor(
     398static void _Scheduler_SMP_Allocate_processor(
    386399  Scheduler_SMP_Context *self,
    387400  Thread_Control *scheduled,
     
    421434}
    422435
    423 static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
    424   Scheduler_SMP_Context *self
    425 )
    426 {
     436static Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
     437  Scheduler_Context *context,
     438  Thread_Control    *filter,
     439  Chain_Node_order   order
     440)
     441{
     442  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
    427443  Thread_Control *lowest_ready = NULL;
    428444  Chain_Control *scheduled = &self->Scheduled;
     
    431447    lowest_ready = (Thread_Control *) _Chain_Last( scheduled );
    432448  }
     449
     450  /*
     451   * _Scheduler_SMP_Enqueue_ordered() assumes that get_lowest_scheduled
     452   * helpers may return NULL. But this method never should.
     453   */
     454  _Assert( lowest_ready != NULL );
    433455
    434456  return lowest_ready;
     
    444466 * @param[in] order The order function.
    445467 * @param[in] insert_ready Function to insert a node into the set of ready
    446  * nodes.
     468 *   nodes.
    447469 * @param[in] insert_scheduled Function to insert a node into the set of
    448  * scheduled nodes.
     470 *   scheduled nodes.
    449471 * @param[in] move_from_scheduled_to_ready Function to move a node from the set
    450  * of scheduled nodes to the set of ready nodes.
     472 *   of scheduled nodes to the set of ready nodes.
     473 * @param[in] get_lowest_scheduled Function to select the thread from the
     474 *   scheduled nodes to replace. It may not be possible to find one.
     475 * @param[in] allocate_processor Function to allocate a processor to a thread
     476 *   based on the rules of the scheduler.
    451477 */
    452478static inline void _Scheduler_SMP_Enqueue_ordered(
    453   Scheduler_Context *context,
    454   Thread_Control *thread,
    455   Chain_Node_order order,
    456   Scheduler_SMP_Insert insert_ready,
    457   Scheduler_SMP_Insert insert_scheduled,
    458   Scheduler_SMP_Move move_from_scheduled_to_ready
     479  Scheduler_Context                 *context,
     480  Thread_Control                    *thread,
     481  Chain_Node_order                   order,
     482  Scheduler_SMP_Insert                insert_ready,
     483  Scheduler_SMP_Insert                insert_scheduled,
     484  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
     485  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
     486  Scheduler_SMP_Allocate_processor    allocate_processor
    459487)
    460488{
    461489  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
    462490  Thread_Control *lowest_scheduled =
    463     _Scheduler_SMP_Get_lowest_scheduled( self );
    464 
    465   _Assert( lowest_scheduled != NULL );
    466 
    467   if ( ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) ) {
     491    ( *get_lowest_scheduled )( context, thread, order );
     492
     493  /*
     494   *  get_lowest_scheduled can return a NULL if no scheduled threads
     495   *  should be removed from their processor based on the selection
     496   *  criteria. For example, this can occur when the affinity of the
     497   *  thread being enqueued schedules it against higher priority threads.
     498   *  A low priority thread with affinity can only consider the threads
     499   *  which are on the cores if has affinity for.
     500   *
     501   *  The get_lowest_scheduled helper should assert on not returning NULL
     502   *  if that is not possible for that scheduler.
     503   */
     504
     505  if ( lowest_scheduled &&
     506       ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) ) {
    468507    Scheduler_SMP_Node *lowest_scheduled_node =
    469508      _Scheduler_SMP_Node_get( lowest_scheduled );
     
    473512      SCHEDULER_SMP_NODE_READY
    474513    );
    475     _Scheduler_SMP_Allocate_processor( self, thread, lowest_scheduled );
     514    ( *allocate_processor )( self, thread, lowest_scheduled );
    476515    ( *insert_scheduled )( &self->Base, thread );
    477516    ( *move_from_scheduled_to_ready )( &self->Base, lowest_scheduled );
     
    490529 * @param[in] get_highest_ready Function to get the highest ready node.
    491530 * @param[in] insert_ready Function to insert a node into the set of ready
    492  * nodes.
     531 *   nodes.
    493532 * @param[in] insert_scheduled Function to insert a node into the set of
    494  * scheduled nodes.
     533 *   scheduled nodes.
    495534 * @param[in] move_from_ready_to_scheduled Function to move a node from the set
    496  * of ready nodes to the set of scheduled nodes.
     535 *   of ready nodes to the set of scheduled nodes.
     536 * @param[in] allocate_processor Function to allocate a processor to a thread
     537 *   based on the rules of the scheduler.
    497538 */
    498539static inline void _Scheduler_SMP_Enqueue_scheduled_ordered(
    499540  Scheduler_Context *context,
    500   Thread_Control *thread,
    501   Chain_Node_order order,
    502   Scheduler_SMP_Get_highest_ready get_highest_ready,
    503   Scheduler_SMP_Insert insert_ready,
    504   Scheduler_SMP_Insert insert_scheduled,
    505   Scheduler_SMP_Move move_from_ready_to_scheduled
     541  Thread_Control                   *thread,
     542  Chain_Node_order                  order,
     543  Scheduler_SMP_Get_highest_ready   get_highest_ready,
     544  Scheduler_SMP_Insert              insert_ready,
     545  Scheduler_SMP_Insert              insert_scheduled,
     546  Scheduler_SMP_Move                move_from_ready_to_scheduled,
     547  Scheduler_SMP_Allocate_processor  allocate_processor
    506548)
    507549{
    508550  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
    509551  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
    510   Thread_Control *highest_ready = ( *get_highest_ready )( &self->Base );
     552  Thread_Control *highest_ready =
     553    ( *get_highest_ready )( &self->Base, thread );
    511554
    512555  _Assert( highest_ready != NULL );
     
    520563  } else {
    521564    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
    522     _Scheduler_SMP_Allocate_processor( self, highest_ready, thread );
     565    ( *allocate_processor) ( self, highest_ready, thread );
    523566    ( *insert_ready )( &self->Base, thread );
    524567    ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
     
    537580  Thread_Control *victim,
    538581  Scheduler_SMP_Get_highest_ready get_highest_ready,
    539   Scheduler_SMP_Move move_from_ready_to_scheduled
     582  Scheduler_SMP_Move move_from_ready_to_scheduled,
     583  Scheduler_SMP_Allocate_processor allocate_processor
    540584)
    541585{
    542586  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
    543   Thread_Control *highest_ready = ( *get_highest_ready )( &self->Base );
    544 
    545   _Scheduler_SMP_Allocate_processor( self, highest_ready, victim );
     587  Thread_Control *highest_ready =
     588    ( *get_highest_ready )( &self->Base, victim );
     589
     590  ( *allocate_processor )( self, highest_ready, victim );
    546591
    547592  ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
     
    564609  Scheduler_SMP_Extract extract_from_ready,
    565610  Scheduler_SMP_Get_highest_ready get_highest_ready,
    566   Scheduler_SMP_Move move_from_ready_to_scheduled
     611  Scheduler_SMP_Move move_from_ready_to_scheduled,
     612  Scheduler_SMP_Allocate_processor allocate_processor
    567613)
    568614{
     
    579625      thread,
    580626      get_highest_ready,
    581       move_from_ready_to_scheduled
     627      move_from_ready_to_scheduled,
     628      allocate_processor
    582629    );
    583630  } else {
  • cpukit/score/preinstall.am

    r647859e r238629f  
    407407PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/cpustdatomic.h
    408408
     409$(PROJECT_INCLUDE)/rtems/score/schedulerprioritysmpimpl.h: include/rtems/score/schedulerprioritysmpimpl.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
     410        $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/schedulerprioritysmpimpl.h
     411PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/schedulerprioritysmpimpl.h
     412
    409413$(PROJECT_INCLUDE)/rtems/score/schedulerpriorityaffinitysmp.h: include/rtems/score/schedulerpriorityaffinitysmp.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
    410414        $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/schedulerpriorityaffinitysmp.h
  • cpukit/score/src/schedulerpriorityaffinitysmp.c

    r647859e r238629f  
    2323#include <rtems/score/schedulerpriorityimpl.h>
    2424#include <rtems/score/schedulersmpimpl.h>
     25#include <rtems/score/schedulerprioritysmpimpl.h>
    2526#include <rtems/score/wkspace.h>
    2627#include <rtems/score/cpusetimpl.h>
    2728
     29#include <rtems/score/priority.h>
     30
     31/*
     32 * The following methods which initially were static in schedulerprioritysmp.c
     33 * are shared with this scheduler. They are now public so they can be shared.
     34 *
     35 *  + _Scheduler_priority_SMP_Get_self
     36 *  + _Scheduler_priority_SMP_Insert_ready_fifo
     37 *  + _Scheduler_priority_SMP_Insert_ready_lifo
     38 *  + _Scheduler_priority_SMP_Node_get
     39 *  + _Scheduler_priority_SMP_Move_from_scheduled_to_ready
     40 *  + _Scheduler_priority_SMP_Move_from_ready_to_scheduled
     41 *  + _Scheduler_priority_SMP_Extract_from_ready
     42 *  + _Scheduler_priority_SMP_Do_update
     43 */
     44
     45/*
     46 * This method returns the scheduler node for the specified thread
     47 * as a scheduler specific type.
     48 */
    2849static Scheduler_priority_affinity_SMP_Node *
    29 _Scheduler_priority_affinity_Node_get( Thread_Control *thread )
    30 {
    31   return ( Scheduler_priority_affinity_SMP_Node * )
    32     _Scheduler_Node_get( thread );
    33 }
    34 
     50_Scheduler_priority_affinity_SMP_Node_get(
     51  Thread_Control *thread
     52)
     53{
     54  return (Scheduler_priority_affinity_SMP_Node *) _Scheduler_Node_get( thread );
     55}
     56
     57/*
     58 * This method initializes the scheduler control information for
     59 * this scheduler instance.
     60 */
    3561void _Scheduler_priority_affinity_SMP_Node_initialize(
    3662  const Scheduler_Control *scheduler,
    37   Thread_Control          *the_thread
    38 )
    39 {
     63  Thread_Control          *thread
     64)
     65{
     66  Scheduler_SMP_Node *smp_node = _Scheduler_SMP_Node_get( thread );
     67
    4068  Scheduler_priority_affinity_SMP_Node *node =
    41     _Scheduler_priority_affinity_Node_get( the_thread );
    42 
    43   _Scheduler_SMP_Node_initialize( &node->Base.Base );
    44 
    45   node->Affinity = *_CPU_set_Default();
     69    _Scheduler_priority_affinity_SMP_Node_get( thread );
     70
     71  (scheduler);
     72
     73  /*
     74   *  All we add is affinity information to the basic SMP node.
     75   */
     76  _Scheduler_SMP_Node_initialize( smp_node );
     77
     78  node->Affinity     = *_CPU_set_Default();
    4679  node->Affinity.set = &node->Affinity.preallocated;
    4780}
    4881
     82/*
     83 * This method is slightly different from _Scheduler_SMP_Allocate_processor()
     84 * in that it does what it is asked to do. _Scheduler_SMP_Allocate_processor()
     85 * attempts to prevent migrations but does not take into account affinity
     86 */
     87static inline void _Scheduler_SMP_Allocate_processor_exact(
     88   Scheduler_SMP_Context *self,
     89   Thread_Control        *scheduled,
     90   Thread_Control        *victim
     91)
     92{
     93   Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled );
     94   Per_CPU_Control    *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
     95   Per_CPU_Control    *cpu_of_victim = _Thread_Get_CPU( victim );
     96   Per_CPU_Control    *cpu_self = _Per_CPU_Get();
     97
     98   _Scheduler_SMP_Node_change_state(
     99     scheduled_node,
     100     SCHEDULER_SMP_NODE_SCHEDULED
     101   );
     102
     103   _Thread_Set_CPU( scheduled, cpu_of_victim );
     104   _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, scheduled );
     105}
     106
     107/*
     108 * This method is unique to this scheduler because it takes into
     109 * account affinity as it determines the highest ready thread.
     110 * Since this is used to pick a new thread to replace the victim,
     111 * the highest ready thread must have affinity such that it can
     112 * be executed on the victim's processor.
     113 */
     114static Thread_Control *_Scheduler_priority_affinity_SMP_Get_highest_ready(
     115  Scheduler_Context *context,
     116  Thread_Control    *victim
     117)
     118{
     119  Scheduler_priority_SMP_Context *self =
     120    _Scheduler_priority_SMP_Get_self( context );
     121  Priority_Control                index;
     122  Thread_Control                 *highest = NULL;
     123  int                             victim_cpu;
     124
     125  /*
     126   * This is done when we need to check if reevaluations are needed.
     127   */
     128  if ( victim == NULL ) {
     129    return _Scheduler_priority_Ready_queue_first(
     130        &self->Bit_map,
     131        &self->Ready[ 0 ]
     132      );
     133  }
     134
     135  victim_cpu = _Per_CPU_Get_index( _Thread_Get_CPU( victim ) );
     136
     137  /**
     138   * @todo The deterministic priority scheduler structure is optimized
     139   * for insertion, extraction, and finding the highest priority
     140   * thread. Scanning the list of ready threads is not a purpose
     141   * for which it was optimized. There are optimizations to be
     142   * made in this loop.
     143   *
     144   * + by checking the major bit, we could potentially skip entire
     145   *   groups of 16.
     146   *
     147   * When using this scheduler as implemented, the application's
     148   * choice of numeric priorities and their distribution can have
     149   * an impact on performance.
     150   */
     151  for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
     152        index <= PRIORITY_MAXIMUM;
     153        index++ )
     154  {
     155    Chain_Control   *chain =  &self->Ready[index];
     156    Chain_Node      *chain_node;
     157    for ( chain_node = _Chain_First( chain );
     158          chain_node != _Chain_Immutable_tail( chain ) ;
     159          chain_node = _Chain_Next( chain_node ) )
     160    {
     161      Thread_Control                       *thread;
     162      Scheduler_priority_affinity_SMP_Node *node;
     163
     164      thread = (Thread_Control *) chain_node;
     165      node = _Scheduler_priority_affinity_SMP_Node_get( thread );
     166
     167      /*
     168       * Can this thread run on this CPU?
     169       */
     170      if ( CPU_ISSET( victim_cpu, node->Affinity.set ) ) {
     171        highest = thread;
     172        break;
     173      }
     174    }
     175    if ( highest )
     176      break;
     177  }
     178
     179  _Assert( highest != NULL );
     180
     181  return highest;
     182}
     183
     184/*
     185 * This method is very similar to _Scheduler_priority_affinity_SMP_Block
     186 * but has the difference that is invokes this scheduler's
     187 * get_highest_ready() support method.
     188 */
     189void _Scheduler_priority_affinity_SMP_Block(
     190  const Scheduler_Control *scheduler,
     191  Thread_Control *thread
     192)
     193{
     194  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
     195
     196  _Scheduler_SMP_Block(
     197    context,
     198    thread,
     199    _Scheduler_priority_SMP_Extract_from_ready,
     200    _Scheduler_priority_affinity_SMP_Get_highest_ready,
     201    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
     202    _Scheduler_SMP_Allocate_processor_exact
     203  );
     204
     205  /*
     206   * Since this removed a single thread from the scheduled set
     207   * and selected the most appropriate thread from the ready
     208   * set to replace it, there should be no need for thread
     209   * migrations.
     210   */
     211}
     212
     213/*
     214 * This method is unique to this scheduler because it must take into
     215 * account affinity as it searches for the lowest priority scheduled
     216 * thread. It ignores those which cannot be replaced by the filter
     217 * thread because the potential victim thread does not have affinity
     218 * for that processor.
     219 */
     220static Thread_Control *_Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
     221  Scheduler_Context *context,
     222  Thread_Control    *filter,
     223  Chain_Node_order   order
     224)
     225{
     226  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
     227  Thread_Control  *lowest_scheduled = NULL;
     228  Thread_Control  *thread = NULL;
     229  Chain_Control   *scheduled = &self->Scheduled;
     230  Scheduler_priority_affinity_SMP_Node *node =
     231    _Scheduler_priority_affinity_SMP_Node_get( filter );
     232
     233  for ( thread =  (Thread_Control *) _Chain_Last( scheduled );
     234        (Chain_Node *) thread != _Chain_Immutable_head( scheduled ) ;
     235        thread = (Thread_Control *) _Chain_Previous( &thread->Object.Node ) ) {
     236    int   cpu_index;
     237
     238    /*
     239     * If we didn't find a thread which is of equal or lower importance
     240     * than filter thread is, then we can't schedule the filter thread
     241     * to execute.
     242     */
     243    if ( (*order)(&thread->Object.Node, &filter->Object.Node) )
     244      break;
     245
     246    /* cpu_index is the processor number thread is executing on */
     247    cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
     248
     249    if ( CPU_ISSET( cpu_index, node->Affinity.set ) ) {
     250      lowest_scheduled = thread;
     251      break;
     252    }
     253
     254  }
     255
     256  return lowest_scheduled;
     257}
     258
     259/*
     260 * This method is unique to this scheduler because it must pass
     261 * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
     262 * _Scheduler_SMP_Enqueue_ordered.
     263 */
     264static void _Scheduler_priority_affinity_SMP_Enqueue_fifo(
     265  Scheduler_Context *context,
     266  Thread_Control *thread
     267)
     268{
     269  _Scheduler_SMP_Enqueue_ordered(
     270    context,
     271    thread,
     272    _Scheduler_simple_Insert_priority_fifo_order,
     273    _Scheduler_priority_SMP_Insert_ready_fifo,
     274    _Scheduler_SMP_Insert_scheduled_fifo,
     275    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
     276    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
     277    _Scheduler_SMP_Allocate_processor_exact
     278  );
     279}
     280
     281/*
     282 * This method is invoked at the end of certain scheduling operations
     283 * to ensure that the highest priority ready thread cannot be scheduled
     284 * to execute. When we schedule with affinity, there is the possibility
     285 * that we need to migrate a thread to another core to ensure that the
     286 * highest priority ready threads are in fact scheduled.
     287 */
     288static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
     289  Scheduler_Context *context
     290)
     291{
     292  Thread_Control        *lowest_scheduled;
     293  Thread_Control        *highest_ready;
     294  Scheduler_SMP_Node    *lowest_scheduled_node;
     295  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
     296
     297  while (1) {
     298    highest_ready =
     299      _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
     300    lowest_scheduled = _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
     301      context,
     302      highest_ready,
     303      _Scheduler_simple_Insert_priority_lifo_order
     304    );
     305
     306    /*
     307     * If we can't find a thread to displace from the scheduled set,
     308     * then we have placed all the highest priority threads possible
     309     * in the scheduled set.
     310     *
     311     * We found the absolute highest priority thread without
     312     * considering affinity. But now we have to consider that thread's
     313     * affinity as we look to place it.
     314     */
     315    if ( lowest_scheduled == NULL )
     316      break;
     317
     318    /*
     319     * But if we found a thread which is lower priority than one
     320     * in the ready set, then we need to swap them out.
     321     */
     322    lowest_scheduled_node = _Scheduler_SMP_Node_get( lowest_scheduled );
     323
     324    _Scheduler_SMP_Node_change_state(
     325      lowest_scheduled_node,
     326      SCHEDULER_SMP_NODE_READY
     327    );
     328
     329    _Scheduler_SMP_Allocate_processor_exact(
     330      self,
     331      highest_ready,
     332      lowest_scheduled
     333    );
     334
     335    _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
     336      context,
     337      highest_ready
     338    );
     339
     340    _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
     341      &self->Base,
     342      lowest_scheduled
     343    );
     344  }
     345}
     346
     347/*
     348 * This is the public scheduler specific Unblock operation.
     349 */
     350void _Scheduler_priority_affinity_SMP_Unblock(
     351  const Scheduler_Control *scheduler,
     352  Thread_Control *thread
     353)
     354{
     355  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
     356
     357  _Scheduler_SMP_Unblock(
     358    context,
     359    thread,
     360    _Scheduler_priority_affinity_SMP_Enqueue_fifo
     361  );
     362
     363  /*
     364   * Perform any thread migrations that are needed due to these changes.
     365   */
     366  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
     367}
     368
     369/*
     370 *  This is unique to this scheduler because it passes scheduler specific
     371 *  get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
     372 */
     373static void _Scheduler_priority_affinity_SMP_Enqueue_ordered(
     374  Scheduler_Context     *context,
     375  Thread_Control        *thread,
     376  Chain_Node_order       order,
     377  Scheduler_SMP_Insert   insert_ready,
     378  Scheduler_SMP_Insert   insert_scheduled
     379)
     380{
     381  _Scheduler_SMP_Enqueue_ordered(
     382    context,
     383    thread,
     384    order,
     385    insert_ready,
     386    insert_scheduled,
     387    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
     388    _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
     389    _Scheduler_SMP_Allocate_processor_exact
     390  );
     391}
     392
     393/*
     394 *  This is unique to this scheduler because it is on the path
     395 *  to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
     396 *  invokes a scheduler unique get_lowest_scheduled helper.
     397 */
     398static void _Scheduler_priority_affinity_SMP_Enqueue_lifo(
     399  Scheduler_Context *context,
     400  Thread_Control *thread
     401)
     402{
     403  _Scheduler_priority_affinity_SMP_Enqueue_ordered(
     404    context,
     405    thread,
     406    _Scheduler_simple_Insert_priority_lifo_order,
     407    _Scheduler_priority_SMP_Insert_ready_lifo,
     408    _Scheduler_SMP_Insert_scheduled_lifo
     409  );
     410}
     411
     412/*
     413 * This method is unique to this scheduler because it must
     414 * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
     415 * this scheduler's get_highest_ready() helper.
     416 */
     417static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
     418  Scheduler_Context *context,
     419  Thread_Control *thread,
     420  Chain_Node_order order,
     421  Scheduler_SMP_Insert insert_ready,
     422  Scheduler_SMP_Insert insert_scheduled
     423)
     424{
     425  _Scheduler_SMP_Enqueue_scheduled_ordered(
     426    context,
     427    thread,
     428    order,
     429    _Scheduler_priority_affinity_SMP_Get_highest_ready,
     430    insert_ready,
     431    insert_scheduled,
     432    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
     433    _Scheduler_SMP_Allocate_processor_exact
     434  );
     435}
     436
     437/*
     438 *  This is unique to this scheduler because it is on the path
     439 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
     440 *  invokes a scheduler unique get_lowest_scheduled helper.
     441 */
     442static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
     443  Scheduler_Context *context,
     444  Thread_Control *thread
     445)
     446{
     447  _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
     448    context,
     449    thread,
     450    _Scheduler_simple_Insert_priority_lifo_order,
     451    _Scheduler_priority_SMP_Insert_ready_lifo,
     452    _Scheduler_SMP_Insert_scheduled_lifo
     453  );
     454}
     455
     456/*
     457 *  This is unique to this scheduler because it is on the path
     458 *  to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
     459 *  invokes a scheduler unique get_lowest_scheduled helper.
     460 */
     461static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
     462  Scheduler_Context *context,
     463  Thread_Control *thread
     464)
     465{
     466  _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
     467    context,
     468    thread,
     469    _Scheduler_simple_Insert_priority_fifo_order,
     470    _Scheduler_priority_SMP_Insert_ready_fifo,
     471    _Scheduler_SMP_Insert_scheduled_fifo
     472  );
     473}
     474
     475/*
     476 * This is the public scheduler specific Change Priority operation.
     477 */
     478void _Scheduler_priority_affinity_SMP_Change_priority(
     479  const Scheduler_Control *scheduler,
     480  Thread_Control          *thread,
     481  Priority_Control         new_priority,
     482  bool                     prepend_it
     483)
     484{
     485  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
     486
     487  _Scheduler_SMP_Change_priority(
     488    context,
     489    thread,
     490    new_priority,
     491    prepend_it,
     492    _Scheduler_priority_SMP_Extract_from_ready,
     493    _Scheduler_priority_SMP_Do_update,
     494    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
     495    _Scheduler_priority_affinity_SMP_Enqueue_lifo,
     496    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
     497    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo
     498  );
     499
     500  /*
     501   * Perform any thread migrations that are needed due to these changes.
     502   */
     503  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
     504}
     505
     506/*
     507 * This is the public scheduler specific Change Priority operation.
     508 */
    49509bool _Scheduler_priority_affinity_SMP_Get_affinity(
    50510  const Scheduler_Control *scheduler,
     
    55515{
    56516  Scheduler_priority_affinity_SMP_Node *node =
    57     _Scheduler_priority_affinity_Node_get(thread);
     517    _Scheduler_priority_affinity_SMP_Node_get(thread);
    58518
    59519  (void) scheduler;
     
    64524
    65525  CPU_COPY( cpuset, node->Affinity.set );
    66   return true; 
     526  return true;
    67527}
    68528
     
    71531  Thread_Control          *thread,
    72532  size_t                   cpusetsize,
    73   cpu_set_t               *cpuset
     533  const cpu_set_t         *cpuset
    74534)
    75535{
    76536  Scheduler_priority_affinity_SMP_Node *node =
    77     _Scheduler_priority_affinity_Node_get(thread);
     537    _Scheduler_priority_affinity_SMP_Node_get(thread);
    78538
    79539  (void) scheduler;
    80  
    81   if ( ! _CPU_set_Is_valid( cpuset, cpusetsize ) ) {
     540
     541  /*
     542   * Validate that the cpset meets basic requirements.
     543   */
     544  if ( !_CPU_set_Is_valid( cpuset, cpusetsize ) ) {
    82545    return false;
    83546  }
    84547
    85   CPU_COPY( node->Affinity.set, cpuset );
    86  
     548  /*
     549   * The old and new set are the same, there is no point in
     550   * doing anything.
     551   */
     552  if ( CPU_EQUAL_S( cpusetsize, cpuset, node->Affinity.set ) )
     553    return true;
     554
     555  _Thread_Set_state( thread, STATES_MIGRATING );
     556    CPU_COPY( node->Affinity.set, cpuset );
     557  _Thread_Clear_state( thread, STATES_MIGRATING );
     558
    87559  return true;
    88560}
  • cpukit/score/src/schedulerprioritysmp.c

    r647859e r238629f  
    2727#include <rtems/score/schedulerprioritysmp.h>
    2828#include <rtems/score/schedulerpriorityimpl.h>
     29#include <rtems/score/schedulerprioritysmpimpl.h>
    2930#include <rtems/score/schedulersmpimpl.h>
    3031
     
    3536}
    3637
    37 static Scheduler_priority_SMP_Context *
    38 _Scheduler_priority_SMP_Get_self( Scheduler_Context *context )
     38Scheduler_priority_SMP_Context *_Scheduler_priority_SMP_Get_self(
     39  Scheduler_Context *context
     40)
    3941{
    4042  return (Scheduler_priority_SMP_Context *) context;
    4143}
    4244
    43 static Scheduler_priority_SMP_Node *_Scheduler_priority_SMP_Node_get(
     45Scheduler_priority_SMP_Node *_Scheduler_priority_SMP_Node_get(
    4446  Thread_Control *thread
    4547)
     
    7577}
    7678
    77 static void _Scheduler_priority_SMP_Do_update(
     79void _Scheduler_priority_SMP_Do_update(
    7880  Scheduler_Context *context,
    7981  Scheduler_Node *base_node,
     
    107109
    108110static Thread_Control *_Scheduler_priority_SMP_Get_highest_ready(
    109   Scheduler_Context *context
    110 )
    111 {
    112   Scheduler_priority_SMP_Context *self =
    113     _Scheduler_priority_SMP_Get_self( context );
     111  Scheduler_Context *context,
     112  Thread_Control    *thread
     113)
     114{
     115  Scheduler_priority_SMP_Context *self =
     116    _Scheduler_priority_SMP_Get_self( context );
     117
     118  (void) thread;
    114119
    115120  return _Scheduler_priority_Ready_queue_first(
     
    119124}
    120125
    121 static void _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
     126void _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
    122127  Scheduler_Context *context,
    123128  Thread_Control *scheduled_to_ready
     
    137142}
    138143
    139 static void _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
     144void _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
    140145  Scheduler_Context *context,
    141146  Thread_Control *ready_to_scheduled
     
    158163}
    159164
    160 static void _Scheduler_priority_SMP_Insert_ready_lifo(
     165void _Scheduler_priority_SMP_Insert_ready_lifo(
    161166  Scheduler_Context *context,
    162167  Thread_Control *thread
     
    175180}
    176181
    177 static void _Scheduler_priority_SMP_Insert_ready_fifo(
     182void _Scheduler_priority_SMP_Insert_ready_fifo(
    178183  Scheduler_Context *context,
    179184  Thread_Control *thread
     
    192197}
    193198
    194 static void _Scheduler_priority_SMP_Extract_from_ready(
     199void _Scheduler_priority_SMP_Extract_from_ready(
    195200  Scheduler_Context *context,
    196201  Thread_Control *thread
     
    221226    _Scheduler_priority_SMP_Extract_from_ready,
    222227    _Scheduler_priority_SMP_Get_highest_ready,
    223     _Scheduler_priority_SMP_Move_from_ready_to_scheduled
     228    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
     229    _Scheduler_SMP_Allocate_processor
    224230  );
    225231}
     
    239245    insert_ready,
    240246    insert_scheduled,
    241     _Scheduler_priority_SMP_Move_from_scheduled_to_ready
     247    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
     248    _Scheduler_SMP_Get_lowest_scheduled,
     249    _Scheduler_SMP_Allocate_processor
    242250  );
    243251}
     
    286294    insert_ready,
    287295    insert_scheduled,
    288     _Scheduler_priority_SMP_Move_from_ready_to_scheduled
     296    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
     297    _Scheduler_SMP_Allocate_processor
    289298  );
    290299}
  • cpukit/score/src/schedulersimplesmp.c

    r647859e r238629f  
    6565
    6666static Thread_Control *_Scheduler_simple_SMP_Get_highest_ready(
    67   Scheduler_Context *context
    68 )
    69 {
    70   Scheduler_simple_SMP_Context *self =
    71     _Scheduler_simple_SMP_Get_self( context );
     67  Scheduler_Context *context,
     68  Thread_Control    *thread
     69)
     70{
     71  Scheduler_simple_SMP_Context *self =
     72    _Scheduler_simple_SMP_Get_self( context );
     73
     74  (void) thread;
    7275
    7376  return (Thread_Control *) _Chain_First( &self->Ready );
     
    156159    _Scheduler_simple_SMP_Extract_from_ready,
    157160    _Scheduler_simple_SMP_Get_highest_ready,
    158     _Scheduler_simple_SMP_Move_from_ready_to_scheduled
     161    _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
     162    _Scheduler_SMP_Allocate_processor
    159163  );
    160164}
     
    174178    insert_ready,
    175179    insert_scheduled,
    176     _Scheduler_simple_SMP_Move_from_scheduled_to_ready
     180    _Scheduler_simple_SMP_Move_from_scheduled_to_ready,
     181    _Scheduler_SMP_Get_lowest_scheduled,
     182    _Scheduler_SMP_Allocate_processor
    177183  );
    178184}
     
    221227    insert_ready,
    222228    insert_scheduled,
    223     _Scheduler_simple_SMP_Move_from_ready_to_scheduled
     229    _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
     230    _Scheduler_SMP_Allocate_processor
    224231  );
    225232}
Note: See TracChangeset for help on using the changeset viewer.