Changeset cceb19f4 in rtems


Ignore:
Timestamp:
Nov 21, 2014, 10:01:34 AM (5 years ago)
Author:
Luca Bonato <lohathe@…>
Branches:
4.11, master
Children:
26f4cdd
Parents:
6570876
git-author:
Luca Bonato <lohathe@…> (11/21/14 10:01:34)
git-committer:
Sebastian Huber <sebastian.huber@…> (11/24/14 07:01:15)
Message:

smp: Fix scheduler helping protocol

New test case for smptests/smpmrsp01.

Fix _Scheduler_Block_node() in case the node is in the
SCHEDULER_HELP_ACTIVE_RIVAL helping state. For example a
rtems_task_suspend() on a task waiting for a MrsP semaphore.

Fix _Scheduler_Unblock_node() in case the node is in the
SCHEDULER_SMP_NODE_READY state. For example a rtems_task_resume() on a
task owning or waiting for a MrsP semaphore.

Files:
4 edited

Legend:

Unmodified
Added
Removed
  • cpukit/score/include/rtems/score/schedulerimpl.h

    r6570876 rcceb19f4  
    10821082RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
    10831083  Scheduler_Context         *context,
     1084  Thread_Control            *thread,
    10841085  Scheduler_Node            *node,
    10851086  bool                       is_scheduled,
     
    10891090  bool block;
    10901091  Thread_Control *old_user = _Scheduler_Node_get_user( node );
    1091   Thread_Control *new_user;
     1092  Thread_Control *new_user = NULL;
    10921093
    10931094  _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_BLOCKED );
    10941095
    1095   if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
    1096     new_user = _Scheduler_Node_get_owner( node );
    1097 
    1098     _Assert( new_user != old_user );
    1099     _Scheduler_Node_set_user( node, new_user );
    1100   } else if (
    1101     node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
    1102       && is_scheduled
    1103   ) {
    1104     new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
    1105   } else {
    1106     new_user = NULL;
    1107   }
    1108 
    1109   if ( new_user != NULL && is_scheduled ) {
     1096  if ( is_scheduled ) {
     1097    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
     1098      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
     1099    } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
     1100      Thread_Control *owner = _Scheduler_Node_get_owner( node );
     1101
     1102      if ( thread == old_user && owner != old_user ) {
     1103        new_user = owner;
     1104        _Scheduler_Node_set_user( node, new_user );
     1105      }
     1106    }
     1107  }
     1108
     1109  if ( new_user != NULL ) {
    11101110    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
    11111111
  • cpukit/score/include/rtems/score/schedulersmpimpl.h

    r6570876 rcceb19f4  
    794794  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
    795795  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
    796   bool block = _Scheduler_Block_node(
     796  bool block;
     797
     798  _Assert( is_scheduled || node->state == SCHEDULER_SMP_NODE_READY );
     799
     800  block = _Scheduler_Block_node(
    797801    context,
     802    thread,
    798803    &node->Base,
    799804    is_scheduled,
    800805    _Scheduler_SMP_Get_idle_thread
    801806  );
    802 
    803807  if ( block ) {
    804808    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
     
    839843
    840844  if ( unblock ) {
    841     _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
    842 
    843     needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
     845    if ( node->state != SCHEDULER_SMP_NODE_READY ) {
     846      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
     847
     848      needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
     849    } else {
     850      _Assert( node->state == SCHEDULER_SMP_NODE_READY );
     851      _Assert( node->Base.idle == NULL );
     852
     853      if ( node->Base.accepts_help == thread ) {
     854        _Assert( node->Base.help_state == SCHEDULER_HELP_ACTIVE_OWNER );
     855        needs_help = thread;
     856      } else {
     857        _Assert( node->Base.help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
     858        needs_help = NULL;
     859      }
     860    }
    844861  } else {
    845862    needs_help = NULL;
  • testsuites/smptests/smpmrsp01/init.c

    r6570876 rcceb19f4  
    5555  rtems_id main_task_id;
    5656  rtems_id migration_task_id;
     57  rtems_id high_task_id;
     58  rtems_id timer_id;
    5759  rtems_id counting_sem_id;
    5860  rtems_id mrsp_ids[MRSP_COUNT];
     
    6769  size_t switch_index;
    6870  switch_event switch_events[32];
     71  volatile bool run;
    6972} test_context;
    7073
     
    727730    *run = true;
    728731  }
     732}
     733
     734static void ready_unlock_worker(rtems_task_argument arg)
     735{
     736  test_context *ctx = &test_instance;
     737  rtems_status_code sc;
     738  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
     739
     740  assert_prio(RTEMS_SELF, 4);
     741
     742  /* Obtain (F) */
     743  barrier(ctx, &barrier_state);
     744
     745  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     746  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     747
     748  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
     749  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     750
     751  assert_prio(RTEMS_SELF, 4);
     752
     753  /* Done (G) */
     754  barrier(ctx, &barrier_state);
     755
     756  rtems_task_suspend(RTEMS_SELF);
     757  rtems_test_assert(0);
     758}
     759
     760static void unblock_ready_timer(rtems_id timer_id, void *arg)
     761{
     762  test_context *ctx = arg;
     763  rtems_status_code sc;
     764
     765  sc = rtems_task_start(
     766    ctx->high_task_id,
     767    run_task,
     768    (rtems_task_argument) &ctx->run
     769  );
     770  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     771
     772  sc = rtems_task_suspend(ctx->high_task_id);
     773  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     774
     775  sc = rtems_task_resume(ctx->high_task_id);
     776  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     777
     778  /*
     779   * At this point the scheduler node of the main thread is in the
     780   * SCHEDULER_SMP_NODE_READY state and a _Scheduler_SMP_Unblock() operation is
     781   * performed.
     782   */
     783  sc = rtems_event_transient_send(ctx->main_task_id);
     784  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     785
     786  sc = rtems_task_suspend(ctx->high_task_id);
     787  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     788}
     789
     790static void unblock_ready_owner(test_context *ctx)
     791{
     792  rtems_status_code sc;
     793
     794  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     795  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     796
     797  assert_prio(RTEMS_SELF, 3);
     798
     799  sc = rtems_timer_fire_after(ctx->timer_id, 2, unblock_ready_timer, ctx);
     800  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     801
     802  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     803  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     804
     805  rtems_test_assert(!ctx->run);
     806}
     807
     808static void unblock_ready_rival(test_context *ctx)
     809{
     810  rtems_status_code sc;
     811  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
     812
     813  sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
     814  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     815
     816  /* Worker obtain (F) */
     817  barrier(ctx, &barrier_state);
     818
     819  sc = rtems_task_wake_after(2);
     820  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     821
     822  sc = rtems_task_suspend(ctx->worker_ids[0]);
     823  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     824
     825  sc = rtems_task_set_scheduler(ctx->high_task_id, ctx->scheduler_ids[1]);
     826  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     827
     828  sc = rtems_task_resume(ctx->high_task_id);
     829  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     830
     831  while (!ctx->run) {
     832    /* Do noting */
     833  }
     834
     835  sc = rtems_task_resume(ctx->worker_ids[0]);
     836  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     837
     838  sc = rtems_task_suspend(ctx->high_task_id);
     839  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     840
     841  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
     842  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     843
     844  assert_prio(RTEMS_SELF, 4);
     845
     846  /* Worker done (F) */
     847  barrier(ctx, &barrier_state);
     848}
     849
     850static void test_mrsp_unblock_ready(test_context *ctx)
     851{
     852  rtems_status_code sc;
     853
     854  puts("test MrsP unblock ready");
     855
     856  ctx->run = false;
     857
     858  change_prio(RTEMS_SELF, 4);
     859
     860  sc = rtems_semaphore_create(
     861    rtems_build_name(' ', ' ', ' ', 'A'),
     862    1,
     863    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
     864      | RTEMS_BINARY_SEMAPHORE,
     865    3,
     866    &ctx->mrsp_ids[0]
     867  );
     868  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     869
     870  assert_prio(RTEMS_SELF, 4);
     871
     872  sc = rtems_task_create(
     873    rtems_build_name('H', 'I', 'G', 'H'),
     874    2,
     875    RTEMS_MINIMUM_STACK_SIZE,
     876    RTEMS_DEFAULT_MODES,
     877    RTEMS_DEFAULT_ATTRIBUTES,
     878    &ctx->high_task_id
     879  );
     880  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     881
     882  sc = rtems_task_create(
     883    rtems_build_name('W', 'O', 'R', 'K'),
     884    4,
     885    RTEMS_MINIMUM_STACK_SIZE,
     886    RTEMS_DEFAULT_MODES,
     887    RTEMS_DEFAULT_ATTRIBUTES,
     888    &ctx->worker_ids[0]
     889  );
     890  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     891
     892  sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
     893  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     894
     895  sc = rtems_timer_create(
     896    rtems_build_name('T', 'I', 'M', 'R'),
     897    &ctx->timer_id
     898  );
     899  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     900
     901  unblock_ready_owner(ctx);
     902  unblock_ready_rival(ctx);
     903
     904  sc = rtems_timer_delete(ctx->timer_id);
     905  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     906
     907  sc = rtems_task_delete(ctx->worker_ids[0]);
     908  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     909
     910  sc = rtems_task_delete(ctx->high_task_id);
     911  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     912
     913  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
     914  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     915
     916  change_prio(RTEMS_SELF, 2);
     917  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    729918}
    730919
     
    12331422  test_mrsp_deadlock_error(ctx);
    12341423  test_mrsp_multiple_obtain();
     1424  test_mrsp_unblock_ready(ctx);
    12351425  test_mrsp_obtain_and_sleep_and_release(ctx);
    12361426  test_mrsp_obtain_and_release_with_help(ctx);
  • testsuites/smptests/smpmrsp01/smpmrsp01.scn

    r6570876 rcceb19f4  
    66test MrsP deadlock error
    77test MrsP multiple obtain
     8test MrsP unblock ready
    89test MrsP obtain and sleep and release
    910[0] MAIN ->  RUN (prio   2, node  RUN)
Note: See TracChangeset for help on using the changeset viewer.