Changeset 34487537 in rtems


Ignore:
Timestamp:
Jul 4, 2017, 7:57:30 AM (2 years ago)
Author:
Sebastian Huber <sebastian.huber@…>
Branches:
master
Children:
4a1bdd30
Parents:
d19dc071
git-author:
Sebastian Huber <sebastian.huber@…> (07/04/17 07:57:30)
git-committer:
Sebastian Huber <sebastian.huber@…> (07/10/17 05:49:36)
Message:

score: Add simple affinity support to EDF SMP

Update #3059.

Files:
8 added
14 edited

Legend:

Unmodified
Added
Removed
  • cpukit/sapi/include/confdefs.h

    rd19dc071 r34487537  
    993993  #if !defined(CONFIGURE_SCHEDULER_CONTROLS)
    994994    /** Configure the context needed by the scheduler instance */
    995     #define CONFIGURE_SCHEDULER_CONTEXT RTEMS_SCHEDULER_CONTEXT_EDF_SMP(dflt)
     995    #define CONFIGURE_SCHEDULER_CONTEXT \
     996      RTEMS_SCHEDULER_CONTEXT_EDF_SMP(dflt, CONFIGURE_MAXIMUM_PROCESSORS)
    996997
    997998    /** Configure the controls for this scheduler instance */
  • cpukit/sapi/include/rtems/scheduler.h

    rd19dc071 r34487537  
    100100    RTEMS_SCHEDULER_CONTEXT_NAME( EDF_SMP_ ## name )
    101101
    102   #define RTEMS_SCHEDULER_CONTEXT_EDF_SMP( name ) \
    103     static Scheduler_EDF_SMP_Context RTEMS_SCHEDULER_CONTEXT_EDF_SMP_NAME( name )
     102  #define RTEMS_SCHEDULER_CONTEXT_EDF_SMP( name, max_cpu_count ) \
     103    static struct { \
     104      Scheduler_EDF_SMP_Context Base; \
     105      Scheduler_EDF_SMP_Ready_queue Ready[ ( max_cpu_count ) + 1 ]; \
     106    } RTEMS_SCHEDULER_CONTEXT_EDF_SMP_NAME( name )
    104107
    105108  #define RTEMS_SCHEDULER_CONTROL_EDF_SMP( name, obj_name ) \
    106109    { \
    107       &RTEMS_SCHEDULER_CONTEXT_EDF_SMP_NAME( name ).Base.Base, \
     110      &RTEMS_SCHEDULER_CONTEXT_EDF_SMP_NAME( name ).Base.Base.Base, \
    108111      SCHEDULER_EDF_SMP_ENTRY_POINTS, \
    109112      SCHEDULER_EDF_MAXIMUM_PRIORITY, \
  • cpukit/score/include/rtems/score/scheduleredfsmp.h

    rd19dc071 r34487537  
    3535
    3636typedef struct {
     37  Scheduler_SMP_Node Base;
     38
     39  /**
     40   * @brief Generation number to ensure FIFO/LIFO order for threads of the same
     41   * priority across different ready queues.
     42   */
     43  int64_t generation;
     44
     45  /**
     46   * @brief The ready queue index depending on the processor affinity of the thread.
     47   *
     48   * The ready queue index zero is used for threads with a one-to-all thread
     49   * processor affinity.  Threads with a one-to-one processor affinity use the
     50   * processor index plus one as the ready queue index.
     51   */
     52  uint32_t ready_queue_index;
     53} Scheduler_EDF_SMP_Node;
     54
     55typedef struct {
     56  /**
     57   * @brief Chain node for Scheduler_SMP_Context::Affine_queues.
     58   */
     59  Chain_Node Node;
     60
     61  /**
     62   * @brief The ready threads of the corresponding affinity.
     63   */
     64  RBTree_Control Queue;
     65
     66  /**
     67   * @brief The scheduled thread of the corresponding processor.
     68   */
     69  Scheduler_EDF_SMP_Node *scheduled;
     70} Scheduler_EDF_SMP_Ready_queue;
     71
     72typedef struct {
    3773  Scheduler_SMP_Context Base;
    38   RBTree_Control        Ready;
     74
     75  /**
     76   * @brief Current generation for FIFO/LIFO ordering.
     77   */
     78  int64_t generations[ 2 ];
     79
     80  /**
     81   * @brief Chain of ready queues with affine threads to determine the highest
     82   * priority ready thread.
     83   */
     84  Chain_Control Affine_queues;
     85
     86  /**
     87   * @brief A table with ready queues.
     88   *
     89   * The index zero queue is used for threads with a one-to-all processor
     90   * affinity.  Index one corresponds to processor index zero, and so on.
     91   */
     92  Scheduler_EDF_SMP_Ready_queue Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
    3993} Scheduler_EDF_SMP_Context;
    40 
    41 typedef struct {
    42   Scheduler_SMP_Node Base;
    43 } Scheduler_EDF_SMP_Node;
    4494
    4595#define SCHEDULER_EDF_SMP_ENTRY_POINTS \
     
    63113    _Scheduler_EDF_Cancel_job, \
    64114    _Scheduler_default_Tick, \
    65     _Scheduler_SMP_Start_idle \
    66     SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
     115    _Scheduler_EDF_SMP_Start_idle, \
     116    _Scheduler_EDF_SMP_Set_affinity \
    67117  }
    68118
     
    127177  Thread_Control          *thread,
    128178  Scheduler_Node          *node
     179);
     180
     181void _Scheduler_EDF_SMP_Start_idle(
     182  const Scheduler_Control *scheduler,
     183  Thread_Control          *idle,
     184  struct Per_CPU_Control  *cpu
     185);
     186
     187bool _Scheduler_EDF_SMP_Set_affinity(
     188  const Scheduler_Control *scheduler,
     189  Thread_Control          *thread,
     190  Scheduler_Node          *node,
     191  const Processor_mask    *affinity
    129192);
    130193
  • cpukit/score/include/rtems/score/schedulersmpimpl.h

    rd19dc071 r34487537  
    88
    99/*
    10  * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
     10 * Copyright (c) 2013, 2017 embedded brains GmbH.  All rights reserved.
    1111 *
    1212 *  embedded brains GmbH
     
    318318);
    319319
     320typedef void ( *Scheduler_SMP_Set_affinity )(
     321  Scheduler_Context *context,
     322  Scheduler_Node    *node,
     323  void              *arg
     324);
     325
    320326typedef bool ( *Scheduler_SMP_Enqueue )(
    321327  Scheduler_Context *context,
     
    329335  Per_CPU_Control   *victim_cpu
    330336);
     337
     338typedef void ( *Scheduler_SMP_Register_idle )(
     339  Scheduler_Context *context,
     340  Scheduler_Node    *idle,
     341  Per_CPU_Control   *cpu
     342);
     343
     344static inline void _Scheduler_SMP_Do_nothing_register_idle(
     345  Scheduler_Context *context,
     346  Scheduler_Node    *idle,
     347  Per_CPU_Control   *cpu
     348)
     349{
     350  (void) context;
     351  (void) idle;
     352  (void) cpu;
     353}
    331354
    332355static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
     
    887910        victim,
    888911        victim_cpu,
     912        allocate_processor
     913      );
     914
     915      ( *move_from_ready_to_scheduled )( context, highest_ready );
     916    } else {
     917      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
     918
     919      _Scheduler_SMP_Node_change_state(
     920        highest_ready,
     921        SCHEDULER_SMP_NODE_BLOCKED
     922      );
     923
     924      ( *extract_from_ready )( context, highest_ready );
     925    }
     926  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
     927}
     928
     929static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
     930  Scheduler_Context                *context,
     931  Scheduler_Node                   *victim,
     932  Per_CPU_Control                  *victim_cpu,
     933  Scheduler_SMP_Extract             extract_from_ready,
     934  Scheduler_SMP_Get_highest_ready   get_highest_ready,
     935  Scheduler_SMP_Move                move_from_ready_to_scheduled,
     936  Scheduler_SMP_Allocate_processor  allocate_processor
     937)
     938{
     939  Scheduler_Try_to_schedule_action action;
     940
     941  do {
     942    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
     943
     944    action = _Scheduler_Try_to_schedule_node(
     945      context,
     946      highest_ready,
     947      NULL,
     948      _Scheduler_SMP_Get_idle_thread
     949    );
     950
     951    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
     952      _Scheduler_SMP_Preempt(
     953        context,
     954        highest_ready,
     955        victim,
    889956        allocate_processor
    890957      );
     
    12751342}
    12761343
     1344static inline void _Scheduler_SMP_Do_start_idle(
     1345  Scheduler_Context           *context,
     1346  Thread_Control              *idle,
     1347  Per_CPU_Control             *cpu,
     1348  Scheduler_SMP_Register_idle  register_idle
     1349)
     1350{
     1351  Scheduler_SMP_Context *self;
     1352  Scheduler_SMP_Node    *node;
     1353
     1354  self = _Scheduler_SMP_Get_self( context );
     1355  node = _Scheduler_SMP_Thread_get_node( idle );
     1356
     1357  _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
     1358  node->state = SCHEDULER_SMP_NODE_SCHEDULED;
     1359
     1360  _Thread_Set_CPU( idle, cpu );
     1361  ( *register_idle )( context, &node->Base, cpu );
     1362  _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
     1363  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
     1364}
     1365
    12771366static inline void _Scheduler_SMP_Add_processor(
    1278   Scheduler_Context       *context,
    1279   Thread_Control          *idle,
    1280   Scheduler_SMP_Has_ready  has_ready,
    1281   Scheduler_SMP_Enqueue    enqueue_scheduled_fifo
     1367  Scheduler_Context           *context,
     1368  Thread_Control              *idle,
     1369  Scheduler_SMP_Has_ready      has_ready,
     1370  Scheduler_SMP_Enqueue        enqueue_scheduled_fifo,
     1371  Scheduler_SMP_Register_idle  register_idle
    12821372)
    12831373{
     
    12901380  node = _Thread_Scheduler_get_home_node( idle );
    12911381  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
     1382  ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
    12921383
    12931384  if ( ( *has_ready )( &self->Base ) ) {
     
    13561447}
    13571448
     1449static inline void _Scheduler_SMP_Set_affinity(
     1450  Scheduler_Context               *context,
     1451  Thread_Control                  *thread,
     1452  Scheduler_Node                  *node,
     1453  void                            *arg,
     1454  Scheduler_SMP_Set_affinity       set_affinity,
     1455  Scheduler_SMP_Extract            extract_from_ready,
     1456  Scheduler_SMP_Get_highest_ready  get_highest_ready,
     1457  Scheduler_SMP_Move               move_from_ready_to_scheduled,
     1458  Scheduler_SMP_Enqueue            enqueue_fifo,
     1459  Scheduler_SMP_Allocate_processor allocate_processor
     1460)
     1461{
     1462  Scheduler_SMP_Node_state node_state;
     1463
     1464  node_state = _Scheduler_SMP_Node_state( node );
     1465
     1466  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
     1467    _Scheduler_SMP_Extract_from_scheduled( node );
     1468    _Scheduler_SMP_Preempt_and_schedule_highest_ready(
     1469      context,
     1470      node,
     1471      _Thread_Get_CPU( thread ),
     1472      extract_from_ready,
     1473      get_highest_ready,
     1474      move_from_ready_to_scheduled,
     1475      allocate_processor
     1476    );
     1477    ( *set_affinity )( context, node, arg );
     1478    ( *enqueue_fifo )( context, node );
     1479  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
     1480    ( *extract_from_ready )( context, node );
     1481    ( *set_affinity )( context, node, arg );
     1482    ( *enqueue_fifo )( context, node );
     1483  } else {
     1484    ( *set_affinity )( context, node, arg );
     1485  }
     1486}
     1487
    13581488/** @} */
    13591489
  • cpukit/score/src/scheduleredfsmp.c

    rd19dc071 r34487537  
    2222#include <rtems/score/schedulersmpimpl.h>
    2323
    24 static Scheduler_EDF_SMP_Context *
     24static inline Scheduler_EDF_SMP_Context *
    2525_Scheduler_EDF_SMP_Get_context( const Scheduler_Control *scheduler )
    2626{
     
    2828}
    2929
    30 static Scheduler_EDF_SMP_Context *
     30static inline Scheduler_EDF_SMP_Context *
    3131_Scheduler_EDF_SMP_Get_self( Scheduler_Context *context )
    3232{
     
    8484
    8585  _Scheduler_SMP_Initialize( &self->Base );
    86   _RBTree_Initialize_empty( &self->Ready );
     86  _Chain_Initialize_empty( &self->Affine_queues );
     87  /* The ready queues are zero initialized and thus empty */
    8788}
    8889
     
    100101}
    101102
    102 static void _Scheduler_EDF_SMP_Do_update(
     103static inline void _Scheduler_EDF_SMP_Do_update(
    103104  Scheduler_Context *context,
    104105  Scheduler_Node    *node,
     
    114115}
    115116
    116 static bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context )
     117static inline bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context )
    117118{
    118119  Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
    119120
    120   return !_RBTree_Is_empty( &self->Ready );
    121 }
    122 
    123 static Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
    124   Scheduler_Context *context,
    125   Scheduler_Node    *node
    126 )
    127 {
    128   Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
    129   Scheduler_Node *first = (Scheduler_Node *) _RBTree_Minimum( &self->Ready );
    130 
    131   (void) node;
    132 
    133   _Assert( &first->Node != NULL );
    134 
    135   return first;
    136 }
    137 
    138 static void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
    139   Scheduler_Context *context,
    140   Scheduler_Node    *scheduled_to_ready
    141 )
    142 {
    143   Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
    144   Scheduler_EDF_SMP_Node *node =
    145     _Scheduler_EDF_SMP_Node_downcast( scheduled_to_ready );
    146 
    147   _Chain_Extract_unprotected( &node->Base.Base.Node.Chain );
     121  return !_RBTree_Is_empty( &self->Ready[ 0 ].Queue );
     122}
     123
     124static inline bool _Scheduler_EDF_SMP_Overall_less(
     125  const Scheduler_EDF_SMP_Node *left,
     126  const Scheduler_EDF_SMP_Node *right
     127)
     128{
     129  Priority_Control lp;
     130  Priority_Control rp;
     131
     132  lp = left->Base.priority;
     133  rp = right->Base.priority;
     134
     135  return lp < rp || (lp == rp && left->generation < right->generation );
     136}
     137
     138static inline Scheduler_EDF_SMP_Node *
     139_Scheduler_EDF_SMP_Challenge_highest_ready(
     140  Scheduler_EDF_SMP_Context *self,
     141  Scheduler_EDF_SMP_Node    *highest_ready,
     142  RBTree_Control            *ready_queue
     143)
     144{
     145  Scheduler_EDF_SMP_Node *other;
     146
     147  other = (Scheduler_EDF_SMP_Node *) _RBTree_Minimum( ready_queue );
     148  _Assert( other != NULL );
     149
     150  if ( _Scheduler_EDF_SMP_Overall_less( other, highest_ready ) ) {
     151    return other;
     152  }
     153
     154  return highest_ready;
     155}
     156
     157static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
     158  Scheduler_Context *context,
     159  Scheduler_Node    *filter
     160)
     161{
     162  Scheduler_EDF_SMP_Context *self;
     163  Scheduler_EDF_SMP_Node    *highest_ready;
     164  Scheduler_EDF_SMP_Node    *node;
     165  uint32_t                   rqi;
     166  const Chain_Node          *tail;
     167  Chain_Node                *next;
     168
     169  self = _Scheduler_EDF_SMP_Get_self( context );
     170  highest_ready = (Scheduler_EDF_SMP_Node *)
     171    _RBTree_Minimum( &self->Ready[ 0 ].Queue );
     172  _Assert( highest_ready != NULL );
     173
     174  /*
     175   * The filter node is a scheduled node which is no longer on the scheduled
     176   * chain.  In case this is an affine thread, then we have to check the
     177   * corresponding affine ready queue.
     178   */
     179
     180  node = (Scheduler_EDF_SMP_Node *) filter;
     181  rqi = node->ready_queue_index;
     182
     183  if ( rqi != 0 && !_RBTree_Is_empty( &self->Ready[ rqi ].Queue ) ) {
     184    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
     185      self,
     186      highest_ready,
     187      &self->Ready[ rqi ].Queue
     188    );
     189  }
     190
     191  tail = _Chain_Immutable_tail( &self->Affine_queues );
     192  next = _Chain_First( &self->Affine_queues );
     193
     194  while ( next != tail ) {
     195    Scheduler_EDF_SMP_Ready_queue *ready_queue;
     196
     197    ready_queue = (Scheduler_EDF_SMP_Ready_queue *) next;
     198    highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
     199      self,
     200      highest_ready,
     201      &ready_queue->Queue
     202    );
     203
     204    next = _Chain_Next( next );
     205  }
     206
     207  return &highest_ready->Base.Base;
     208}
     209
     210static inline void _Scheduler_EDF_SMP_Set_scheduled(
     211  Scheduler_EDF_SMP_Context *self,
     212  Scheduler_EDF_SMP_Node    *scheduled,
     213  const Per_CPU_Control     *cpu
     214)
     215{
     216  self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].scheduled = scheduled;
     217}
     218
     219static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled(
     220  const Scheduler_EDF_SMP_Context *self,
     221  uint32_t                         rqi
     222)
     223{
     224  return self->Ready[ rqi ].scheduled;
     225}
     226
     227static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
     228  Scheduler_Context *context,
     229  Scheduler_Node    *filter_base,
     230  Chain_Node_order   order
     231)
     232{
     233  Scheduler_EDF_SMP_Node *filter;
     234  uint32_t                rqi;
     235
     236  filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
     237  rqi = filter->ready_queue_index;
     238
     239  if ( rqi != 0 ) {
     240    Scheduler_EDF_SMP_Context *self;
     241    Scheduler_EDF_SMP_Node    *node;
     242
     243    self = _Scheduler_EDF_SMP_Get_self( context );
     244    node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
     245
     246    if ( node->ready_queue_index > 0 ) {
     247      _Assert( node->ready_queue_index == rqi );
     248      return &node->Base.Base;
     249    }
     250  }
     251
     252  return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base, order );
     253}
     254
     255static inline void _Scheduler_EDF_SMP_Insert_ready(
     256  Scheduler_Context *context,
     257  Scheduler_Node    *node_base,
     258  size_t             generation_index,
     259  int                increment,
     260  bool            ( *less )( const void *, const RBTree_Node * )
     261)
     262{
     263  Scheduler_EDF_SMP_Context     *self;
     264  Scheduler_EDF_SMP_Node        *node;
     265  uint32_t                       rqi;
     266  Scheduler_EDF_SMP_Ready_queue *ready_queue;
     267  int64_t                        generation;
     268
     269  self = _Scheduler_EDF_SMP_Get_self( context );
     270  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
     271  rqi = node->ready_queue_index;
     272  ready_queue = &self->Ready[ rqi ];
     273
     274  generation = self->generations[ generation_index ];
     275  node->generation = generation;
     276  self->generations[ generation_index ] = generation + increment;
     277
    148278  _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
    149279  _RBTree_Insert_inline(
    150     &self->Ready,
     280    &ready_queue->Queue,
    151281    &node->Base.Base.Node.RBTree,
    152282    &node->Base.priority,
     283    less
     284  );
     285
     286  if ( rqi != 0 && _Chain_Is_node_off_chain( &ready_queue->Node ) ) {
     287    Scheduler_EDF_SMP_Node *scheduled;
     288
     289    scheduled = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
     290
     291    if ( scheduled->ready_queue_index == 0 ) {
     292      _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
     293    }
     294  }
     295}
     296
     297static inline void _Scheduler_EDF_SMP_Extract_from_ready(
     298  Scheduler_Context *context,
     299  Scheduler_Node    *node_to_extract
     300)
     301{
     302  Scheduler_EDF_SMP_Context     *self;
     303  Scheduler_EDF_SMP_Node        *node;
     304  uint32_t                       rqi;
     305  Scheduler_EDF_SMP_Ready_queue *ready_queue;
     306
     307  self = _Scheduler_EDF_SMP_Get_self( context );
     308  node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
     309  rqi = node->ready_queue_index;
     310  ready_queue = &self->Ready[ rqi ];
     311
     312  _RBTree_Extract( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
     313  _Chain_Initialize_node( &node->Base.Base.Node.Chain );
     314
     315  if (
     316    rqi != 0
     317      && _RBTree_Is_empty( &ready_queue->Queue )
     318      && !_Chain_Is_node_off_chain( &ready_queue->Node )
     319  ) {
     320    _Chain_Extract_unprotected( &ready_queue->Node );
     321    _Chain_Set_off_chain( &ready_queue->Node );
     322  }
     323}
     324
     325static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
     326  Scheduler_Context *context,
     327  Scheduler_Node    *scheduled_to_ready
     328)
     329{
     330  _Chain_Extract_unprotected( &scheduled_to_ready->Node.Chain );
     331  _Scheduler_EDF_SMP_Insert_ready(
     332    context,
     333    scheduled_to_ready,
     334    0,
     335    1,
    153336    _Scheduler_EDF_SMP_Less
    154337  );
    155338}
    156339
    157 static void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
     340static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
    158341  Scheduler_Context *context,
    159342  Scheduler_Node    *ready_to_scheduled
    160343)
    161344{
    162   Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
    163   Scheduler_EDF_SMP_Node *node =
    164     _Scheduler_EDF_SMP_Node_downcast( ready_to_scheduled );
    165 
    166   _RBTree_Extract( &self->Ready, &node->Base.Base.Node.RBTree );
    167   _Chain_Initialize_node( &node->Base.Base.Node.Chain );
    168   _Chain_Insert_ordered_unprotected(
    169     &self->Base.Scheduled,
    170     &node->Base.Base.Node.Chain,
    171     _Scheduler_SMP_Insert_priority_fifo_order
    172   );
    173 }
    174 
    175 static void _Scheduler_EDF_SMP_Insert_ready_lifo(
     345  _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
     346  _Scheduler_SMP_Insert_scheduled_fifo( context, ready_to_scheduled );
     347}
     348
     349static inline void _Scheduler_EDF_SMP_Insert_ready_lifo(
    176350  Scheduler_Context *context,
    177351  Scheduler_Node    *node_to_insert
    178352)
    179353{
    180   Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
    181   Scheduler_EDF_SMP_Node *node =
    182     _Scheduler_EDF_SMP_Node_downcast( node_to_insert );
    183 
    184   _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
    185   _RBTree_Insert_inline(
    186     &self->Ready,
    187     &node->Base.Base.Node.RBTree,
    188     &node->Base.priority,
     354  _Scheduler_EDF_SMP_Insert_ready(
     355    context,
     356    node_to_insert,
     357    1,
     358    -1,
    189359    _Scheduler_EDF_SMP_Less_or_equal
    190360  );
    191361}
    192362
    193 static void _Scheduler_EDF_SMP_Insert_ready_fifo(
     363static inline void _Scheduler_EDF_SMP_Insert_ready_fifo(
    194364  Scheduler_Context *context,
    195365  Scheduler_Node    *node_to_insert
    196366)
    197367{
    198   Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
    199   Scheduler_EDF_SMP_Node *node =
    200     _Scheduler_EDF_SMP_Node_downcast( node_to_insert );
    201 
    202   _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
    203   _RBTree_Insert_inline(
    204     &self->Ready,
    205     &node->Base.Base.Node.RBTree,
    206     &node->Base.priority,
     368  _Scheduler_EDF_SMP_Insert_ready(
     369    context,
     370    node_to_insert,
     371    0,
     372    1,
    207373    _Scheduler_EDF_SMP_Less
    208374  );
    209375}
    210376
    211 static void _Scheduler_EDF_SMP_Extract_from_ready(
    212   Scheduler_Context *context,
    213   Scheduler_Node    *node_to_extract
    214 )
    215 {
    216   Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
    217   Scheduler_EDF_SMP_Node *node =
    218     _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
    219 
    220   _RBTree_Extract( &self->Ready, &node->Base.Base.Node.RBTree );
    221   _Chain_Initialize_node( &node->Base.Base.Node.Chain );
     377static inline void _Scheduler_EDF_SMP_Allocate_processor(
     378  Scheduler_Context *context,
     379  Scheduler_Node    *scheduled_base,
     380  Scheduler_Node    *victim_base,
     381  Per_CPU_Control   *victim_cpu
     382)
     383{
     384  Scheduler_EDF_SMP_Context     *self;
     385  Scheduler_EDF_SMP_Node        *scheduled;
     386  uint32_t                       rqi;
     387
     388  (void) victim_base;
     389  self = _Scheduler_EDF_SMP_Get_self( context );
     390  scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
     391  rqi = scheduled->ready_queue_index;
     392
     393  if ( rqi != 0 ) {
     394    Scheduler_EDF_SMP_Ready_queue *ready_queue;
     395    Per_CPU_Control               *desired_cpu;
     396
     397    ready_queue = &self->Ready[ rqi ];
     398
     399    if ( !_Chain_Is_node_off_chain( &ready_queue->Node ) ) {
     400      _Chain_Extract_unprotected( &ready_queue->Node );
     401      _Chain_Set_off_chain( &ready_queue->Node );
     402    }
     403
     404    desired_cpu = _Per_CPU_Get_by_index( rqi - 1 );
     405
     406    if ( victim_cpu != desired_cpu ) {
     407      Scheduler_EDF_SMP_Node *node;
     408
     409      node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
     410      _Assert( node->ready_queue_index == 0 );
     411      _Scheduler_EDF_SMP_Set_scheduled( self, node, victim_cpu );
     412      _Scheduler_SMP_Allocate_processor_exact(
     413        context,
     414        &node->Base.Base,
     415        NULL,
     416        victim_cpu
     417      );
     418      victim_cpu = desired_cpu;
     419    }
     420  }
     421
     422  _Scheduler_EDF_SMP_Set_scheduled( self, scheduled, victim_cpu );
     423  _Scheduler_SMP_Allocate_processor_exact(
     424    context,
     425    &scheduled->Base.Base,
     426    NULL,
     427    victim_cpu
     428  );
    222429}
    223430
     
    237444    _Scheduler_EDF_SMP_Get_highest_ready,
    238445    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
    239     _Scheduler_SMP_Allocate_processor_lazy
    240   );
    241 }
    242 
    243 static bool _Scheduler_EDF_SMP_Enqueue_ordered(
     446    _Scheduler_EDF_SMP_Allocate_processor
     447  );
     448}
     449
     450static inline bool _Scheduler_EDF_SMP_Enqueue_ordered(
    244451  Scheduler_Context    *context,
    245452  Scheduler_Node       *node,
     
    256463    insert_scheduled,
    257464    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
    258     _Scheduler_SMP_Get_lowest_scheduled,
    259     _Scheduler_SMP_Allocate_processor_lazy
    260   );
    261 }
    262 
    263 static bool _Scheduler_EDF_SMP_Enqueue_lifo(
     465    _Scheduler_EDF_SMP_Get_lowest_scheduled,
     466    _Scheduler_EDF_SMP_Allocate_processor
     467  );
     468}
     469
     470static inline bool _Scheduler_EDF_SMP_Enqueue_lifo(
    264471  Scheduler_Context *context,
    265472  Scheduler_Node    *node
     
    275482}
    276483
    277 static bool _Scheduler_EDF_SMP_Enqueue_fifo(
     484static inline bool _Scheduler_EDF_SMP_Enqueue_fifo(
    278485  Scheduler_Context *context,
    279486  Scheduler_Node    *node
     
    289496}
    290497
    291 static bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
     498static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
    292499  Scheduler_Context *context,
    293500  Scheduler_Node *node,
     
    306513    insert_scheduled,
    307514    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
    308     _Scheduler_SMP_Allocate_processor_lazy
    309   );
    310 }
    311 
    312 static bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo(
     515    _Scheduler_EDF_SMP_Allocate_processor
     516  );
     517}
     518
     519static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo(
    313520  Scheduler_Context *context,
    314521  Scheduler_Node *node
     
    324531}
    325532
    326 static bool _Scheduler_EDF_SMP_Enqueue_scheduled_fifo(
     533static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_fifo(
    327534  Scheduler_Context *context,
    328535  Scheduler_Node *node
     
    355562}
    356563
    357 static bool _Scheduler_EDF_SMP_Do_ask_for_help(
     564static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
    358565  Scheduler_Context *context,
    359566  Thread_Control    *the_thread,
     
    369576    _Scheduler_SMP_Insert_scheduled_lifo,
    370577    _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
    371     _Scheduler_SMP_Get_lowest_scheduled,
    372     _Scheduler_SMP_Allocate_processor_lazy
     578    _Scheduler_EDF_SMP_Get_lowest_scheduled,
     579    _Scheduler_EDF_SMP_Allocate_processor
    373580  );
    374581}
     
    440647    _Scheduler_EDF_SMP_Get_highest_ready,
    441648    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
    442     _Scheduler_SMP_Allocate_processor_lazy
    443   );
     649    _Scheduler_EDF_SMP_Allocate_processor
     650  );
     651}
     652
     653static inline void _Scheduler_EDF_SMP_Register_idle(
     654  Scheduler_Context *context,
     655  Scheduler_Node    *idle_base,
     656  Per_CPU_Control   *cpu
     657)
     658{
     659  Scheduler_EDF_SMP_Context *self;
     660  Scheduler_EDF_SMP_Node    *idle;
     661
     662  self = _Scheduler_EDF_SMP_Get_self( context );
     663  idle = _Scheduler_EDF_SMP_Node_downcast( idle_base );
     664  _Scheduler_EDF_SMP_Set_scheduled( self, idle, cpu );
    444665}
    445666
     
    455676    idle,
    456677    _Scheduler_EDF_SMP_Has_ready,
    457     _Scheduler_EDF_SMP_Enqueue_scheduled_fifo
     678    _Scheduler_EDF_SMP_Enqueue_scheduled_fifo,
     679    _Scheduler_EDF_SMP_Register_idle
    458680  );
    459681}
     
    491713  );
    492714}
     715
     716static inline void _Scheduler_EDF_SMP_Do_set_affinity(
     717  Scheduler_Context *context,
     718  Scheduler_Node    *node_base,
     719  void              *arg
     720)
     721{
     722  Scheduler_EDF_SMP_Node *node;
     723  const uint32_t         *rqi;
     724
     725  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
     726  rqi = arg;
     727  node->ready_queue_index = *rqi;
     728}
     729
     730void _Scheduler_EDF_SMP_Start_idle(
     731  const Scheduler_Control *scheduler,
     732  Thread_Control          *idle,
     733  Per_CPU_Control         *cpu
     734)
     735{
     736  Scheduler_Context *context;
     737
     738  context = _Scheduler_Get_context( scheduler );
     739
     740  _Scheduler_SMP_Do_start_idle(
     741    context,
     742    idle,
     743    cpu,
     744    _Scheduler_EDF_SMP_Register_idle
     745  );
     746}
     747
     748bool _Scheduler_EDF_SMP_Set_affinity(
     749  const Scheduler_Control *scheduler,
     750  Thread_Control          *thread,
     751  Scheduler_Node          *node,
     752  const Processor_mask    *affinity
     753)
     754{
     755  Scheduler_Context *context;
     756  Processor_mask     a;
     757  Processor_mask     b;
     758  uint32_t           rqi;
     759
     760  context = _Scheduler_Get_context( scheduler );
     761  _Processor_mask_And( &a, &context->Processors, affinity );
     762
     763  if ( _Processor_mask_Count( &a ) == 0 ) {
     764    return false;
     765  }
     766
     767  _Processor_mask_And( &b, &_SMP_Online_processors, affinity );
     768
     769  if ( _Processor_mask_Count( &b ) == _SMP_Processor_count ) {
     770    rqi = 0;
     771  } else {
     772    rqi = _Processor_mask_Find_last_set( &a );
     773  }
     774
     775  _Scheduler_SMP_Set_affinity(
     776    context,
     777    thread,
     778    node,
     779    &rqi,
     780    _Scheduler_EDF_SMP_Do_set_affinity,
     781    _Scheduler_EDF_SMP_Extract_from_ready,
     782    _Scheduler_EDF_SMP_Get_highest_ready,
     783    _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
     784    _Scheduler_EDF_SMP_Enqueue_fifo,
     785    _Scheduler_EDF_SMP_Allocate_processor
     786  );
     787
     788  return true;
     789}
  • cpukit/score/src/schedulerpriorityaffinitysmp.c

    rd19dc071 r34487537  
    580580    idle,
    581581    _Scheduler_priority_SMP_Has_ready,
    582     _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo
     582    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
     583    _Scheduler_SMP_Do_nothing_register_idle
    583584  );
    584585}
  • cpukit/score/src/schedulerprioritysmp.c

    rd19dc071 r34487537  
    324324    idle,
    325325    _Scheduler_priority_SMP_Has_ready,
    326     _Scheduler_priority_SMP_Enqueue_scheduled_fifo
     326    _Scheduler_priority_SMP_Enqueue_scheduled_fifo,
     327    _Scheduler_SMP_Do_nothing_register_idle
    327328  );
    328329}
  • cpukit/score/src/schedulersimplesmp.c

    rd19dc071 r34487537  
    399399    idle,
    400400    _Scheduler_simple_SMP_Has_ready,
    401     _Scheduler_simple_SMP_Enqueue_scheduled_fifo
     401    _Scheduler_simple_SMP_Enqueue_scheduled_fifo,
     402    _Scheduler_SMP_Do_nothing_register_idle
    402403  );
    403404}
  • cpukit/score/src/schedulersmpstartidle.c

    rd19dc071 r34487537  
    11/*
    2  * Copyright (c) 2013, 2016 embedded brains GmbH.
     2 * Copyright (c) 2013, 2017 embedded brains GmbH.
    33 *
    44 * The license and distribution terms for this file may be
     
    1919)
    2020{
    21   Scheduler_Context     *context;
    22   Scheduler_SMP_Context *self;
    23   Scheduler_SMP_Node    *node;
     21  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
    2422
    25   context = _Scheduler_Get_context( scheduler );
    26   self = _Scheduler_SMP_Get_self( context );
    27   node = _Scheduler_SMP_Thread_get_node( idle );
    28 
    29   _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
    30   node->state = SCHEDULER_SMP_NODE_SCHEDULED;
    31 
    32   _Thread_Set_CPU( idle, cpu );
    33   _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
    34   _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
     23  _Scheduler_SMP_Do_start_idle(
     24    context,
     25    idle,
     26    cpu,
     27    _Scheduler_SMP_Do_nothing_register_idle
     28  );
    3529}
  • cpukit/score/src/schedulerstrongapa.c

    rd19dc071 r34487537  
    458458    idle,
    459459    _Scheduler_strong_APA_Has_ready,
    460     _Scheduler_strong_APA_Enqueue_scheduled_fifo
     460    _Scheduler_strong_APA_Enqueue_scheduled_fifo,
     461    _Scheduler_SMP_Do_nothing_register_idle
    461462  );
    462463}
  • testsuites/smptests/Makefile.am

    rd19dc071 r34487537  
    3737_SUBDIRS += smpschedaffinity05
    3838_SUBDIRS += smpschededf01
     39_SUBDIRS += smpschededf02
     40_SUBDIRS += smpschededf03
    3941_SUBDIRS += smpschedsem01
    4042_SUBDIRS += smpscheduler01
  • testsuites/smptests/configure.ac

    rd19dc071 r34487537  
    9292smpschedaffinity05/Makefile
    9393smpschededf01/Makefile
     94smpschededf02/Makefile
     95smpschededf03/Makefile
    9496smpschedsem01/Makefile
    9597smpscheduler01/Makefile
  • testsuites/smptests/smpschededf01/init.c

    rd19dc071 r34487537  
    141141#define CONFIGURE_MAXIMUM_PERIODS 2
    142142
     143#define CONFIGURE_MAXIMUM_PROCESSORS 1
     144
    143145#define CONFIGURE_SCHEDULER_EDF_SMP
    144146
    145147#include <rtems/scheduler.h>
    146148
    147 RTEMS_SCHEDULER_CONTEXT_EDF_SMP(a);
     149RTEMS_SCHEDULER_CONTEXT_EDF_SMP(a, CONFIGURE_MAXIMUM_PROCESSORS);
    148150
    149151#define CONFIGURE_SCHEDULER_CONTROLS \
  • testsuites/smptests/smpscheduler07/init.c

    rd19dc071 r34487537  
    3333#include <rtems/scheduler.h>
    3434
    35 RTEMS_SCHEDULER_CONTEXT_EDF_SMP(a);
     35RTEMS_SCHEDULER_CONTEXT_EDF_SMP(a, CONFIGURE_MAXIMUM_PROCESSORS);
    3636
    3737#define CONFIGURE_SCHEDULER_CONTROLS \
Note: See TracChangeset for help on using the changeset viewer.