Changeset 5bd822a7 in rtems


Ignore:
Timestamp:
11/26/14 10:51:34 (9 years ago)
Author:
Sebastian Huber <sebastian.huber@…>
Branches:
4.11, 5, master
Children:
0ff1c29
Parents:
79569ae
git-author:
Sebastian Huber <sebastian.huber@…> (11/26/14 10:51:34)
git-committer:
Sebastian Huber <sebastian.huber@…> (11/27/14 09:33:31)
Message:

smp: Fix scheduler helping protocol

Ensure that scheduler nodes in the SCHEDULER_HELP_ACTIVE_OWNER or
SCHEDULER_HELP_ACTIVE_RIVAL helping state are always
SCHEDULER_SMP_NODE_READY or SCHEDULER_SMP_NODE_SCHEDULED to ensure the
MrsP protocol properties.

Files:
3 edited

Legend:

Unmodified
Added
Removed
  • cpukit/score/include/rtems/score/schedulerimpl.h

    r79569ae r5bd822a7  
    950950 * @brief Use an idle thread for this scheduler node.
    951951 *
    952  * A thread in the SCHEDULER_HELP_ACTIVE_OWNER owner state may use an idle
    953  * thread for the scheduler node owned by itself in case it executes currently
    954  * using another scheduler node or in case it is in a blocking state.
     952 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
     953 * helping state may use an idle thread for the scheduler node owned by itself
     954 * in case it executes currently using another scheduler node or in case it is
     955 * in a blocking state.
    955956 *
    956957 * @param[in] context The scheduler instance context.
     
    966967  Thread_Control *idle = ( *get_idle_thread )( context );
    967968
    968   _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER );
     969  _Assert(
     970    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
     971      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
     972  );
    969973  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
    970974  _Assert(
     
    10101014    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
    10111015      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
     1016    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
     1017      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
    10121018    } else {
    10131019      _Scheduler_Node_set_user( node, owner );
     
    10731079 *
    10741080 * @param[in] context The scheduler instance context.
     1081 * @param[in] thread The thread which wants to get blocked referencing this
     1082 *   node.  This is not necessarily the user of this node in case the node
     1083 *   participates in the scheduler helping protocol.
    10751084 * @param[in] node The node which wants to get blocked.
    10761085 * @param[in] is_scheduled This node is scheduled.
     
    10881097)
    10891098{
    1090   bool block;
    1091   Thread_Control *old_user = _Scheduler_Node_get_user( node );
    1092   Thread_Control *new_user = NULL;
    1093 
    1094   _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_BLOCKED );
    1095 
    1096   if ( is_scheduled ) {
    1097     if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
     1099  Thread_Control *old_user;
     1100  Thread_Control *new_user;
     1101
     1102  _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
     1103
     1104  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
     1105    _Assert( thread == _Scheduler_Node_get_user( node ) );
     1106
     1107    return true;
     1108  }
     1109
     1110  new_user = NULL;
     1111
     1112  if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
     1113    if ( is_scheduled ) {
     1114      _Assert( thread == _Scheduler_Node_get_user( node ) );
     1115      old_user = thread;
    10981116      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
    1099     } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
    1100       Thread_Control *owner = _Scheduler_Node_get_owner( node );
    1101 
    1102       if ( thread == old_user && owner != old_user ) {
    1103         new_user = owner;
    1104         _Scheduler_Node_set_user( node, new_user );
     1117    }
     1118  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
     1119    if ( is_scheduled ) {
     1120      old_user = _Scheduler_Node_get_user( node );
     1121
     1122      if ( thread == old_user ) {
     1123        Thread_Control *owner = _Scheduler_Node_get_owner( node );
     1124
     1125        if (
     1126          thread != owner
     1127            && owner->Scheduler.state == THREAD_SCHEDULER_READY
     1128        ) {
     1129          new_user = owner;
     1130          _Scheduler_Node_set_user( node, new_user );
     1131        } else {
     1132          new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
     1133        }
    11051134      }
    11061135    }
     1136  } else {
     1137    /* Not implemented, this is part of the OMIP support path. */
     1138    _Assert(0);
    11071139  }
    11081140
     
    11131145    _Thread_Set_CPU( new_user, cpu );
    11141146    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
    1115 
    1116     block = false;
    1117   } else {
    1118     block = true;
    1119   }
    1120 
    1121   return block;
     1147  }
     1148
     1149  return false;
    11221150}
    11231151
     
    11471175    Thread_Control *old_user = _Scheduler_Node_get_user( node );
    11481176    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
     1177    Thread_Control *idle = _Scheduler_Release_idle_thread(
     1178      context,
     1179      node,
     1180      release_idle_thread
     1181    );
     1182    Thread_Control *owner = _Scheduler_Node_get_owner( node );
     1183    Thread_Control *new_user;
    11491184
    11501185    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
    1151       Thread_Control *idle = _Scheduler_Release_idle_thread(
    1152         context,
    1153         node,
    1154         release_idle_thread
    1155       );
    1156 
    11571186      _Assert( idle != NULL );
    1158       (void) idle;
     1187      new_user = the_thread;
     1188    } else if ( idle != NULL ) {
     1189      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
     1190      new_user = the_thread;
     1191    } else if ( the_thread != owner ) {
     1192      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
     1193      _Assert( old_user != the_thread );
     1194      _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY );
     1195      new_user = the_thread;
     1196      _Scheduler_Node_set_user( node, new_user );
    11591197    } else {
    11601198      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
    1161 
    1162       _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
    1163       _Scheduler_Node_set_user( node, the_thread );
    1164     }
    1165 
    1166     _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
    1167     _Thread_Set_CPU( the_thread, cpu );
    1168     _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
     1199      _Assert( old_user != the_thread );
     1200      _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
     1201      new_user = NULL;
     1202    }
     1203
     1204    if ( new_user != NULL ) {
     1205      _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
     1206      _Thread_Set_CPU( new_user, cpu );
     1207      _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
     1208    }
    11691209
    11701210    unblock = false;
     
    12441284      new_user = needs_help;
    12451285    } else {
    1246       _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
     1286      _Assert(
     1287        node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
     1288          || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
     1289      );
    12471290      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
    12481291
  • testsuites/smptests/smpmrsp01/init.c

    r79569ae r5bd822a7  
    5555  rtems_id main_task_id;
    5656  rtems_id migration_task_id;
    57   rtems_id high_task_id;
     57  rtems_id low_task_id[2];
     58  rtems_id high_task_id[2];
    5859  rtems_id timer_id;
    5960  rtems_id counting_sem_id;
     
    6970  size_t switch_index;
    7071  switch_event switch_events[32];
    71   volatile bool run;
     72  volatile bool high_run[2];
     73  volatile bool low_run[2];
    7274} test_context;
    7375
     
    718720  sc = rtems_semaphore_delete(sem_c_id);
    719721  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    720 
    721   change_prio(RTEMS_SELF, 2);
    722   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    723722}
    724723
     
    754753  barrier(ctx, &barrier_state);
    755754
    756   rtems_task_suspend(RTEMS_SELF);
    757   rtems_test_assert(0);
     755  while (true) {
     756    /* Do nothing */
     757  }
    758758}
    759759
     
    764764
    765765  sc = rtems_task_start(
    766     ctx->high_task_id,
     766    ctx->high_task_id[0],
    767767    run_task,
    768     (rtems_task_argument) &ctx->run
    769   );
    770   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    771 
    772   sc = rtems_task_suspend(ctx->high_task_id);
    773   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    774 
    775   sc = rtems_task_resume(ctx->high_task_id);
     768    (rtems_task_argument) &ctx->high_run[0]
     769  );
     770  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     771
     772  sc = rtems_task_suspend(ctx->high_task_id[0]);
     773  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     774
     775  sc = rtems_task_resume(ctx->high_task_id[0]);
    776776  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    777777
     
    784784  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    785785
    786   sc = rtems_task_suspend(ctx->high_task_id);
     786  sc = rtems_task_suspend(ctx->high_task_id[0]);
    787787  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    788788}
     
    803803  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    804804
    805   rtems_test_assert(!ctx->run);
    806 }
    807 
    808 static void unblock_ready_rival(test_context *ctx)
     805  rtems_test_assert(!ctx->high_run[0]);
     806}
     807
     808static void unblock_owner_before_rival_timer(rtems_id timer_id, void *arg)
     809{
     810  test_context *ctx = arg;
     811  rtems_status_code sc;
     812
     813  sc = rtems_task_suspend(ctx->high_task_id[0]);
     814  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     815
     816  sc = rtems_task_suspend(ctx->high_task_id[1]);
     817  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     818}
     819
     820static void unblock_owner_after_rival_timer(rtems_id timer_id, void *arg)
     821{
     822  test_context *ctx = arg;
     823  rtems_status_code sc;
     824
     825  sc = rtems_task_suspend(ctx->high_task_id[1]);
     826  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     827
     828  sc = rtems_task_suspend(ctx->high_task_id[0]);
     829  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     830}
     831
     832static void various_block_unblock(test_context *ctx)
    809833{
    810834  rtems_status_code sc;
    811835  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
    812 
    813   sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
    814   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    815836
    816837  /* Worker obtain (F) */
     
    823844  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    824845
    825   sc = rtems_task_set_scheduler(ctx->high_task_id, ctx->scheduler_ids[1]);
    826   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    827 
    828   sc = rtems_task_resume(ctx->high_task_id);
    829   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    830 
    831   while (!ctx->run) {
     846  sc = rtems_task_wake_after(2);
     847  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     848
     849  sc = rtems_task_start(
     850    ctx->high_task_id[1],
     851    run_task,
     852    (rtems_task_argument) &ctx->high_run[1]
     853  );
     854  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     855
     856  while (!ctx->high_run[1]) {
    832857    /* Do noting */
    833858  }
     
    836861  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    837862
    838   sc = rtems_task_suspend(ctx->high_task_id);
     863  /* Try to schedule a blocked active rival */
     864
     865  sc = rtems_task_suspend(ctx->worker_ids[0]);
     866  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     867
     868  sc = rtems_task_suspend(ctx->high_task_id[1]);
     869  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     870
     871  sc = rtems_task_resume(ctx->high_task_id[1]);
     872  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     873
     874  sc = rtems_task_resume(ctx->worker_ids[0]);
     875  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     876
     877  rtems_test_assert(rtems_get_current_processor() == 0);
     878
     879  /* Use node of the active rival */
     880
     881  sc = rtems_task_suspend(ctx->high_task_id[1]);
     882  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     883
     884  sc = rtems_task_resume(ctx->high_task_id[0]);
     885  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     886
     887  rtems_test_assert(rtems_get_current_processor() == 1);
     888
     889  sc = rtems_task_suspend(ctx->worker_ids[0]);
     890  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     891
     892  sc = rtems_task_resume(ctx->worker_ids[0]);
     893  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     894
     895  /*
     896   * Try to schedule an active rival with an already scheduled active owner
     897   * user.
     898   */
     899
     900  sc = rtems_timer_fire_after(
     901    ctx->timer_id,
     902    2,
     903    unblock_owner_before_rival_timer,
     904    ctx
     905  );
     906  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     907
     908  /* This will take the processor away from us, the timer will help later */
     909  sc = rtems_task_resume(ctx->high_task_id[1]);
     910  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     911
     912  /*
     913   * Try to schedule an active owner with an already scheduled active rival
     914   * user.
     915   */
     916
     917  sc = rtems_task_resume(ctx->high_task_id[0]);
     918  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     919
     920  sc = rtems_timer_fire_after(
     921    ctx->timer_id,
     922    2,
     923    unblock_owner_after_rival_timer,
     924    ctx
     925  );
     926  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     927
     928  /* This will take the processor away from us, the timer will help later */
     929  sc = rtems_task_resume(ctx->high_task_id[1]);
    839930  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    840931
     
    842933  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    843934
     935  rtems_test_assert(rtems_get_current_processor() == 0);
     936
    844937  assert_prio(RTEMS_SELF, 4);
    845938
    846   /* Worker done (F) */
     939  /* Worker done (G) */
    847940  barrier(ctx, &barrier_state);
    848941}
    849942
    850 static void test_mrsp_unblock_ready(test_context *ctx)
    851 {
    852   rtems_status_code sc;
    853 
    854   puts("test MrsP unblock ready");
    855 
    856   ctx->run = false;
     943static void start_low_task(test_context *ctx, size_t i)
     944{
     945  rtems_status_code sc;
     946
     947  sc = rtems_task_create(
     948    rtems_build_name('L', 'O', 'W', '0' + i),
     949    5,
     950    RTEMS_MINIMUM_STACK_SIZE,
     951    RTEMS_DEFAULT_MODES,
     952    RTEMS_DEFAULT_ATTRIBUTES,
     953    &ctx->low_task_id[i]
     954  );
     955  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     956
     957  sc = rtems_task_set_scheduler(ctx->low_task_id[i], ctx->scheduler_ids[i]);
     958  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     959
     960  sc = rtems_task_start(
     961    ctx->low_task_id[i],
     962    run_task,
     963    (rtems_task_argument) &ctx->low_run[i]
     964  );
     965  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     966}
     967
     968static void test_mrsp_various_block_and_unblock(test_context *ctx)
     969{
     970  rtems_status_code sc;
     971
     972  puts("test MrsP various block and unblock");
    857973
    858974  change_prio(RTEMS_SELF, 4);
     975
     976  reset_switch_events(ctx);
     977
     978  ctx->low_run[0] = false;
     979  ctx->low_run[1] = false;
     980  ctx->high_run[0] = false;
     981  ctx->high_run[1] = false;
    859982
    860983  sc = rtems_semaphore_create(
     
    871994
    872995  sc = rtems_task_create(
    873     rtems_build_name('H', 'I', 'G', 'H'),
     996    rtems_build_name('H', 'I', 'G', '0'),
    874997    2,
    875998    RTEMS_MINIMUM_STACK_SIZE,
    876999    RTEMS_DEFAULT_MODES,
    8771000    RTEMS_DEFAULT_ATTRIBUTES,
    878     &ctx->high_task_id
    879   );
     1001    &ctx->high_task_id[0]
     1002  );
     1003  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     1004
     1005  sc = rtems_task_create(
     1006    rtems_build_name('H', 'I', 'G', '1'),
     1007    2,
     1008    RTEMS_MINIMUM_STACK_SIZE,
     1009    RTEMS_DEFAULT_MODES,
     1010    RTEMS_DEFAULT_ATTRIBUTES,
     1011    &ctx->high_task_id[1]
     1012  );
     1013  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     1014
     1015  sc = rtems_task_set_scheduler(ctx->high_task_id[1], ctx->scheduler_ids[1]);
    8801016  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    8811017
     
    8931029  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    8941030
     1031  sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
     1032  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     1033
    8951034  sc = rtems_timer_create(
    8961035    rtems_build_name('T', 'I', 'M', 'R'),
     
    8991038  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    9001039
     1040  /* In case these tasks run, then we have a MrsP protocol violation */
     1041  start_low_task(ctx, 0);
     1042  start_low_task(ctx, 1);
     1043
    9011044  unblock_ready_owner(ctx);
    902   unblock_ready_rival(ctx);
     1045  various_block_unblock(ctx);
     1046
     1047  rtems_test_assert(!ctx->low_run[0]);
     1048  rtems_test_assert(!ctx->low_run[1]);
     1049
     1050  print_switch_events(ctx);
    9031051
    9041052  sc = rtems_timer_delete(ctx->timer_id);
    9051053  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    9061054
     1055  sc = rtems_task_delete(ctx->high_task_id[0]);
     1056  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     1057
     1058  sc = rtems_task_delete(ctx->high_task_id[1]);
     1059  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     1060
    9071061  sc = rtems_task_delete(ctx->worker_ids[0]);
    9081062  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    9091063
    910   sc = rtems_task_delete(ctx->high_task_id);
     1064  sc = rtems_task_delete(ctx->low_task_id[0]);
     1065  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     1066
     1067  sc = rtems_task_delete(ctx->low_task_id[1]);
    9111068  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    9121069
    9131070  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
    914   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    915 
    916   change_prio(RTEMS_SELF, 2);
    9171071  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    9181072}
     
    14221576  test_mrsp_deadlock_error(ctx);
    14231577  test_mrsp_multiple_obtain();
    1424   test_mrsp_unblock_ready(ctx);
     1578  test_mrsp_various_block_and_unblock(ctx);
    14251579  test_mrsp_obtain_and_sleep_and_release(ctx);
    14261580  test_mrsp_obtain_and_release_with_help(ctx);
  • testsuites/smptests/smpmrsp01/smpmrsp01.scn

    r79569ae r5bd822a7  
    66test MrsP deadlock error
    77test MrsP multiple obtain
    8 test MrsP unblock ready
     8test MrsP various block and unblock
     9[1] IDLE -> WORK (prio   4, node WORK)
     10[0] MAIN -> IDLE (prio   3, node MAIN)
     11[0] IDLE -> MAIN (prio   3, node MAIN)
     12[1] WORK -> IDLE (prio   3, node WORK)
     13[1] IDLE -> HIG1 (prio   2, node HIG1)
     14[1] HIG1 -> IDLE (prio   3, node WORK)
     15[1] IDLE -> HIG1 (prio   2, node HIG1)
     16[1] HIG1 -> WORK (prio   3, node WORK)
     17[1] WORK -> MAIN (prio   3, node WORK)
     18[0] MAIN -> HIG0 (prio   2, node HIG0)
     19[1] MAIN -> HIG1 (prio   2, node HIG1)
     20[1] HIG1 -> WORK (prio   3, node WORK)
     21[0] HIG0 -> MAIN (prio   3, node MAIN)
     22[1] WORK -> MAIN (prio   3, node WORK)
     23[0] MAIN -> HIG0 (prio   2, node HIG0)
     24[1] MAIN -> HIG1 (prio   2, node HIG1)
     25[1] HIG1 -> MAIN (prio   3, node WORK)
     26[0] HIG0 -> IDLE (prio   4, node MAIN)
     27[1] MAIN -> WORK (prio   3, node WORK)
     28[0] IDLE -> MAIN (prio   4, node MAIN)
    929test MrsP obtain and sleep and release
    1030[0] MAIN ->  RUN (prio   2, node  RUN)
Note: See TracChangeset for help on using the changeset viewer.