Changeset 6771359f in rtems


Ignore:
Timestamp:
Oct 27, 2016, 4:42:06 AM (3 years ago)
Author:
Sebastian Huber <sebastian.huber@…>
Branches:
master
Children:
1cafc46
Parents:
0e754fac
git-author:
Sebastian Huber <sebastian.huber@…> (10/27/16 04:42:06)
git-committer:
Sebastian Huber <sebastian.huber@…> (11/02/16 09:05:44)
Message:

score: Second part of new MrsP implementation

Update #2556.

Files:
7 edited

Legend:

Unmodified
Added
Removed
  • cpukit/score/include/rtems/score/schedulerimpl.h

    r0e754fac r6771359f  
    483483
    484484  _Scheduler_Acquire_critical( scheduler, &lock_context );
     485
     486  scheduler_node->sticky_level += sticky_level_change;
     487  _Assert( scheduler_node->sticky_level >= 0 );
    485488
    486489  ( *scheduler->Operations.update_priority )(
     
    930933);
    931934
    932 RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
    933   Thread_Control *the_thread,
    934   Scheduler_Node *node
    935 )
    936 {
    937   the_thread->Scheduler.node = node;
    938 }
    939 
    940 RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
    941   Thread_Control       *the_thread,
    942   Scheduler_Node       *node,
    943   const Thread_Control *previous_user_of_node
    944 )
    945 {
    946   const Scheduler_Control *scheduler =
    947     _Scheduler_Get_own( previous_user_of_node );
    948 
    949   the_thread->Scheduler.control = scheduler;
    950   _Scheduler_Thread_set_node( the_thread, node );
    951 }
    952 
    953935extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
    954936
     
    976958)
    977959{
    978   _Assert(
    979     node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
    980       || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
    981   );
    982960  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
    983961  _Assert(
     
    985963  );
    986964
    987   _Scheduler_Thread_set_node( idle, node );
    988 
    989965  _Scheduler_Node_set_user( node, idle );
    990966  node->idle = idle;
     
    994970 * @brief Use an idle thread for this scheduler node.
    995971 *
    996  * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
    997  * helping state may use an idle thread for the scheduler node owned by itself
    998  * in case it executes currently using another scheduler node or in case it is
    999  * in a blocking state.
     972 * A thread those home scheduler node has a sticky level greater than zero may
     973 * use an idle thread in the home scheduler instance in case it executes
     974 * currently in another scheduler instance or in case it is in a blocking
     975 * state.
    1000976 *
    1001977 * @param[in] context The scheduler instance context.
    1002978 * @param[in] node The node which wants to use the idle thread.
     979 * @param[in] cpu The processor for the idle thread.
    1003980 * @param[in] get_idle_thread Function to get an idle thread.
    1004981 */
     
    1006983  Scheduler_Context         *context,
    1007984  Scheduler_Node            *node,
     985  Per_CPU_Control           *cpu,
    1008986  Scheduler_Get_idle_thread  get_idle_thread
    1009987)
     
    1012990
    1013991  _Scheduler_Set_idle_thread( node, idle );
    1014 
     992  _Thread_Set_CPU( idle, cpu );
    1015993  return idle;
    1016994}
     
    10431021  ISR_lock_Context                  lock_context;
    10441022  Scheduler_Try_to_schedule_action  action;
    1045   Thread_Control                   *owner;
    10461023  Thread_Control                   *user;
    10471024
     
    10511028  _Thread_Scheduler_acquire_critical( user, &lock_context );
    10521029
    1053   if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
    1054     if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
    1055       _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) );
    1056       _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
    1057     } else {
    1058       action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
    1059     }
    1060 
    1061     _Thread_Scheduler_release_critical( user, &lock_context );
    1062     return action;
    1063   }
    1064 
    1065   owner = _Scheduler_Node_get_owner( node );
    1066 
    1067   if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
    1068     if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
    1069       _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
    1070     } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
    1071       if ( idle != NULL ) {
    1072         action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
    1073       } else {
    1074         _Scheduler_Use_idle_thread( context, node, get_idle_thread );
    1075       }
    1076     } else {
    1077       _Scheduler_Node_set_user( node, owner );
    1078     }
    1079   } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
    1080     if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
    1081       _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
    1082     } else if ( idle != NULL ) {
    1083       action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
    1084     } else {
    1085       _Scheduler_Use_idle_thread( context, node, get_idle_thread );
    1086     }
     1030  if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
     1031    _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) );
     1032    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
     1033  } else if (
     1034    user->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
     1035      || node->sticky_level == 0
     1036  ) {
     1037    action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
     1038  } else if ( idle != NULL ) {
     1039    action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
    10871040  } else {
    1088     _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
    1089 
    1090     if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
    1091       _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
    1092     } else {
    1093       action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
    1094     }
    1095   }
    1096 
    1097   if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
    1098     _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
     1041    _Scheduler_Use_idle_thread(
     1042      context,
     1043      node,
     1044      _Thread_Get_CPU( user ),
     1045      get_idle_thread
     1046    );
    10991047  }
    11001048
     
    11261074    node->idle = NULL;
    11271075    _Scheduler_Node_set_user( node, owner );
    1128     _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
    1129     _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
    1130 
    11311076    ( *release_idle_thread )( context, idle );
    11321077  }
     
    11721117)
    11731118{
     1119  int               sticky_level;
    11741120  ISR_lock_Context  lock_context;
    1175   Thread_Control   *old_user;
    1176   Thread_Control   *new_user;
    11771121  Per_CPU_Control  *thread_cpu;
     1122
     1123  sticky_level = node->sticky_level;
     1124  --sticky_level;
     1125  node->sticky_level = sticky_level;
     1126  _Assert( sticky_level >= 0 );
    11781127
    11791128  _Thread_Scheduler_acquire_critical( thread, &lock_context );
     
    11831132  _Thread_Scheduler_release_critical( thread, &lock_context );
    11841133
    1185   if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
    1186     _Assert( thread == _Scheduler_Node_get_user( node ) );
    1187 
    1188     return thread_cpu;
    1189   }
    1190 
    1191   new_user = NULL;
    1192 
    1193   if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
    1194     if ( is_scheduled ) {
    1195       _Assert( thread == _Scheduler_Node_get_user( node ) );
    1196       old_user = thread;
    1197       new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
     1134  if ( sticky_level > 0 ) {
     1135    if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
     1136      Thread_Control *idle;
     1137
     1138      idle = _Scheduler_Use_idle_thread(
     1139        context,
     1140        node,
     1141        thread_cpu,
     1142        get_idle_thread
     1143      );
     1144      _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
    11981145    }
    1199   } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
    1200     if ( is_scheduled ) {
    1201       old_user = _Scheduler_Node_get_user( node );
    1202 
    1203       if ( thread == old_user ) {
    1204         Thread_Control *owner = _Scheduler_Node_get_owner( node );
    1205 
    1206         if (
    1207           thread != owner
    1208             && owner->Scheduler.state == THREAD_SCHEDULER_READY
    1209         ) {
    1210           new_user = owner;
    1211           _Scheduler_Node_set_user( node, new_user );
    1212         } else {
    1213           new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
    1214         }
    1215       }
    1216     }
    1217   } else {
    1218     /* Not implemented, this is part of the OMIP support path. */
    1219     _Assert(0);
    1220   }
    1221 
    1222   if ( new_user != NULL ) {
    1223     Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
    1224 
    1225     _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
    1226     _Thread_Set_CPU( new_user, cpu );
    1227     _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
    1228   }
    1229 
    1230   return NULL;
     1146
     1147    return NULL;
     1148  }
     1149
     1150  _Assert( thread == _Scheduler_Node_get_user( node ) );
     1151  return thread_cpu;
     1152}
     1153
     1154RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
     1155  Scheduler_Context             *context,
     1156  Thread_Control                *the_thread,
     1157  Scheduler_Node                *node,
     1158  Scheduler_Release_idle_thread  release_idle_thread
     1159)
     1160{
     1161  Thread_Control  *idle;
     1162  Thread_Control  *owner;
     1163  Per_CPU_Control *cpu;
     1164
     1165  idle = _Scheduler_Node_get_idle( node );
     1166  owner = _Scheduler_Node_get_owner( node );
     1167
     1168  node->idle = NULL;
     1169  _Assert( _Scheduler_Node_get_user( node ) == idle );
     1170  _Scheduler_Node_set_user( node, owner );
     1171  ( *release_idle_thread )( context, idle );
     1172
     1173  cpu = _Thread_Get_CPU( idle );
     1174  _Thread_Set_CPU( the_thread, cpu );
     1175  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
    12311176}
    12321177
     
    12531198  bool unblock;
    12541199
     1200  ++node->sticky_level;
     1201  _Assert( node->sticky_level > 0 );
     1202
    12551203  if ( is_scheduled ) {
    1256     Thread_Control *old_user = _Scheduler_Node_get_user( node );
    1257     Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
    1258     Thread_Control *idle = _Scheduler_Release_idle_thread(
     1204    _Scheduler_Discard_idle_thread(
    12591205      context,
     1206      the_thread,
    12601207      node,
    12611208      release_idle_thread
    12621209    );
    1263     Thread_Control *owner = _Scheduler_Node_get_owner( node );
    1264     Thread_Control *new_user;
    1265 
    1266     if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
    1267       _Assert( idle != NULL );
    1268       new_user = the_thread;
    1269     } else if ( idle != NULL ) {
    1270       _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
    1271       new_user = the_thread;
    1272     } else if ( the_thread != owner ) {
    1273       _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
    1274       _Assert( old_user != the_thread );
    1275       _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
    1276       new_user = the_thread;
    1277       _Scheduler_Node_set_user( node, new_user );
    1278     } else {
    1279       _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
    1280       _Assert( old_user != the_thread );
    1281       _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
    1282       new_user = NULL;
    1283     }
    1284 
    1285     if ( new_user != NULL ) {
    1286       _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
    1287       _Thread_Set_CPU( new_user, cpu );
    1288       _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
    1289     }
    1290 
     1210    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
    12911211    unblock = false;
    12921212  } else {
    12931213    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
    1294 
    12951214    unblock = true;
    12961215  }
     
    13731292
    13741293#if defined(RTEMS_SMP)
    1375   _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
    1376   _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
    1377   _Chain_Initialize_one(
    1378     &the_thread->Scheduler.Wait_nodes,
    1379     &new_scheduler_node->Thread.Wait_node
    1380   );
    1381   _Chain_Extract_unprotected(
    1382     &old_scheduler_node->Thread.Scheduler_node.Chain
    1383   );
    1384   _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
    1385   _Chain_Initialize_one(
    1386     &the_thread->Scheduler.Scheduler_nodes,
    1387     &new_scheduler_node->Thread.Scheduler_node.Chain
    1388   );
    1389 
    13901294  {
    13911295    const Scheduler_Control *old_scheduler;
     
    14011305        _Scheduler_Block( the_thread );
    14021306      }
     1307
     1308      _Assert( old_scheduler_node->sticky_level == 0 );
     1309      _Assert( new_scheduler_node->sticky_level == 0 );
     1310
     1311      _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
     1312      _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
     1313      _Chain_Initialize_one(
     1314        &the_thread->Scheduler.Wait_nodes,
     1315        &new_scheduler_node->Thread.Wait_node
     1316      );
     1317      _Chain_Extract_unprotected(
     1318        &old_scheduler_node->Thread.Scheduler_node.Chain
     1319      );
     1320      _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
     1321      _Chain_Initialize_one(
     1322        &the_thread->Scheduler.Scheduler_nodes,
     1323        &new_scheduler_node->Thread.Scheduler_node.Chain
     1324      );
    14031325
    14041326      the_thread->Scheduler.own_control = new_scheduler;
  • cpukit/score/include/rtems/score/schedulernode.h

    r0e754fac r6771359f  
    2929#if defined(RTEMS_SMP)
    3030/**
    31  * @brief State to indicate potential help for other threads.
    32  *
    33  * @dot
    34  * digraph state {
    35  *   y [label="HELP YOURSELF"];
    36  *   ao [label="HELP ACTIVE OWNER"];
    37  *   ar [label="HELP ACTIVE RIVAL"];
    38  *
    39  *   y -> ao [label="obtain"];
    40  *   y -> ar [label="wait for obtain"];
    41  *   ao -> y [label="last release"];
    42  *   ao -> r [label="wait for obtain"];
    43  *   ar -> r [label="timeout"];
    44  *   ar -> ao [label="timeout"];
    45  * }
    46  * @enddot
    47  */
    48 typedef enum {
    49   /**
    50    * @brief This scheduler node is solely used by the owner thread.
    51    *
    52    * This thread owns no resources using a helping protocol and thus does not
    53    * take part in the scheduler helping protocol.  No help will be provided for
    54    * other thread.
    55    */
    56   SCHEDULER_HELP_YOURSELF,
    57 
    58   /**
    59    * @brief This scheduler node is owned by a thread actively owning a resource.
    60    *
    61    * This scheduler node can be used to help out threads.
    62    *
    63    * In case this scheduler node changes its state from ready to scheduled and
    64    * the thread executes using another node, then an idle thread will be
    65    * provided as a user of this node to temporarily execute on behalf of the
    66    * owner thread.  Thus lower priority threads are denied access to the
    67    * processors of this scheduler instance.
    68    *
    69    * In case a thread actively owning a resource performs a blocking operation,
    70    * then an idle thread will be used also in case this node is in the
    71    * scheduled state.
    72    */
    73   SCHEDULER_HELP_ACTIVE_OWNER,
    74 
    75   /**
    76    * @brief This scheduler node is owned by a thread actively obtaining a
    77    * resource currently owned by another thread.
    78    *
    79    * This scheduler node can be used to help out threads.
    80    *
    81    * The thread owning this node is ready and will give away its processor in
    82    * case the thread owning the resource asks for help.
    83    */
    84   SCHEDULER_HELP_ACTIVE_RIVAL,
    85 
    86   /**
    87    * @brief This scheduler node is owned by a thread obtaining a
    88    * resource currently owned by another thread.
    89    *
    90    * This scheduler node can be used to help out threads.
    91    *
    92    * The thread owning this node is blocked.
    93    */
    94   SCHEDULER_HELP_PASSIVE
    95 } Scheduler_Help_state;
    96 #endif
    97 
    98 #if defined(RTEMS_SMP)
    99 /**
    10031 * @brief The scheduler node requests.
    10132 */
     
    14778
    14879  /**
     80   * @brief The sticky level determines if this scheduler node should use an
     81   * idle thread in case this node is scheduled and the owner thread is
     82   * blocked.
     83   */
     84  int sticky_level;
     85
     86  /**
    14987   * @brief The thread using this node.
     88   *
     89   * This is either the owner or an idle thread.
    15090   */
    15191  struct _Thread_Control *user;
    15292
    15393  /**
    154    * @brief The help state of this node.
    155    */
    156   Scheduler_Help_state help_state;
    157 
    158   /**
    159    * @brief The idle thread claimed by this node in case the help state is
    160    * SCHEDULER_HELP_ACTIVE_OWNER.
    161    *
    162    * Active owners will lend their own node to an idle thread in case they
    163    * execute currently using another node or in case they perform a blocking
    164    * operation.  This is necessary to ensure the priority ceiling protocols
    165    * work across scheduler boundaries.
     94   * @brief The idle thread claimed by this node in case the sticky level is
     95   * greater than zero and the thread is block or is scheduled on another
     96   * scheduler instance.
     97   *
     98   * This is necessary to ensure the priority ceiling protocols work across
     99   * scheduler boundaries.
    166100   */
    167101  struct _Thread_Control *idle;
    168 
     102#endif
     103
     104  /**
     105   * @brief The thread owning this node.
     106   */
     107  struct _Thread_Control *owner;
     108
     109#if defined(RTEMS_SMP)
    169110  /**
    170111   * @brief The thread accepting help by this node in case the help state is
     
    223164
    224165  /**
    225    * @brief The thread owning this node.
    226    */
    227   struct _Thread_Control *owner;
    228 
    229   /**
    230166   * @brief The thread priority information used by the scheduler.
    231167   *
  • cpukit/score/include/rtems/score/schedulernodeimpl.h

    r0e754fac r6771359f  
    4747  node->Wait.Priority.scheduler = scheduler;
    4848  node->user = the_thread;
    49   node->help_state = SCHEDULER_HELP_YOURSELF;
    5049  node->idle = NULL;
    5150  node->accepts_help = the_thread;
  • cpukit/score/include/rtems/score/schedulersmpimpl.h

    r0e754fac r6771359f  
    782782     * it now on the scheduled or ready set.
    783783     */
    784     if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
     784    if (
     785      node->sticky_level > 0
     786        && ( *order )( &node->Node, &highest_ready->Node )
     787    ) {
    785788      ( *insert_scheduled )( context, node );
     789
     790      if ( _Scheduler_Node_get_idle( node ) != NULL ) {
     791        Thread_Control   *owner;
     792        ISR_lock_Context  lock_context;
     793
     794        owner = _Scheduler_Node_get_owner( node );
     795        _Thread_Scheduler_acquire_critical( owner, &lock_context );
     796
     797        if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
     798          _Thread_Scheduler_cancel_need_for_help(
     799            owner,
     800            _Thread_Get_CPU( owner )
     801          );
     802          _Scheduler_Discard_idle_thread(
     803            context,
     804            owner,
     805            node,
     806            _Scheduler_SMP_Release_idle_thread
     807          );
     808          _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
     809        }
     810
     811        _Thread_Scheduler_release_critical( owner, &lock_context );
     812      }
     813
    786814      return NULL;
    787815    }
     
    9931021    } else {
    9941022      _Assert( node_state == SCHEDULER_SMP_NODE_READY );
    995       _Assert(
    996         node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
    997           || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
    998       );
     1023      _Assert( node->sticky_level > 0 );
    9991024      _Assert( node->idle == NULL );
    10001025
     
    11471172  _Thread_Scheduler_acquire_critical( thread, &lock_context );
    11481173
    1149   if (
    1150     thread->Scheduler.state == THREAD_SCHEDULER_READY
    1151       && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_BLOCKED
    1152   ) {
    1153     if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
     1174  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
     1175    Scheduler_SMP_Node_state node_state;
     1176
     1177    node_state = _Scheduler_SMP_Node_state( node );
     1178
     1179    if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
     1180      if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
     1181        _Thread_Scheduler_cancel_need_for_help(
     1182          thread,
     1183          _Thread_Get_CPU( thread )
     1184        );
     1185        _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
     1186        _Thread_Scheduler_release_critical( thread, &lock_context );
     1187
     1188        _Scheduler_SMP_Preempt(
     1189          context,
     1190          node,
     1191          lowest_scheduled,
     1192          allocate_processor
     1193        );
     1194
     1195        ( *insert_scheduled )( context, node );
     1196        ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
     1197
     1198        _Scheduler_Release_idle_thread(
     1199          context,
     1200          lowest_scheduled,
     1201          _Scheduler_SMP_Release_idle_thread
     1202        );
     1203        success = true;
     1204      } else {
     1205        _Thread_Scheduler_release_critical( thread, &lock_context );
     1206        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
     1207        ( *insert_ready )( context, node );
     1208        success = false;
     1209      }
     1210    } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
    11541211      _Thread_Scheduler_cancel_need_for_help(
    11551212        thread,
    11561213        _Thread_Get_CPU( thread )
    11571214      );
     1215      _Scheduler_Discard_idle_thread(
     1216        context,
     1217        thread,
     1218        node,
     1219        _Scheduler_SMP_Release_idle_thread
     1220      );
    11581221      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
    11591222      _Thread_Scheduler_release_critical( thread, &lock_context );
    1160 
    1161       _Scheduler_SMP_Preempt(
    1162         context,
    1163         node,
    1164         lowest_scheduled,
    1165         allocate_processor
    1166       );
    1167 
    1168       ( *insert_scheduled )( context, node );
    1169       ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
    1170 
    1171       _Scheduler_Release_idle_thread(
    1172         context,
    1173         lowest_scheduled,
    1174         _Scheduler_SMP_Release_idle_thread
    1175       );
    11761223      success = true;
    11771224    } else {
    11781225      _Thread_Scheduler_release_critical( thread, &lock_context );
    1179       _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
    1180       ( *insert_ready )( context, node );
    11811226      success = false;
    11821227    }
     
    12031248    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
    12041249      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
     1250      && node->sticky_level == 1
    12051251  ) {
    12061252    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
  • cpukit/score/include/rtems/score/threadimpl.h

    r0e754fac r6771359f  
    577577 */
    578578void _Thread_Priority_update( Thread_queue_Context *queue_context );
     579
     580#if defined(RTEMS_SMP)
     581void _Thread_Priority_and_sticky_update(
     582  Thread_Control *the_thread,
     583  int             sticky_level_change
     584);
     585#endif
    579586
    580587/**
  • cpukit/score/src/schedulerpriorityaffinitysmp.c

    r0e754fac r6771359f  
    336336
    337337    /*
    338      * FIXME: Do not consider threads using the scheduler helping protocol
    339      * since this could produce more than one thread in need for help in one
    340      * operation which is currently not possible.
    341      */
    342     if ( lowest_scheduled->help_state != SCHEDULER_HELP_YOURSELF )
    343       break;
    344 
    345     /*
    346338     * But if we found a thread which is lower priority than one
    347339     * in the ready set, then we need to swap them out.
  • testsuites/smptests/smpmrsp01/init.c

    r0e754fac r6771359f  
    14281428
    14291429  change_prio(run_task_id, 4);
    1430 
    1431   rtems_test_assert(rtems_get_current_processor() == 1);
    1432 
    1433   sc = rtems_task_wake_after(2);
    1434   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    14351430
    14361431  rtems_test_assert(rtems_get_current_processor() == 1);
Note: See TracChangeset for help on using the changeset viewer.