Changeset 441e8ef in rtems


Ignore:
Timestamp:
May 14, 2015, 1:41:00 PM (4 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.11, master
Children:
e893061
Parents:
b7f3ee9 (diff), a0001d6 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of ssh://dispatch.rtems.org/data/git/rtems

Files:
8 edited

Legend:

Unmodified
Added
Removed
  • c/src/lib/libbsp/sparc/shared/timer/tlib_ckinit.c

    rb7f3ee9 r441e8ef  
    5252void Clock_exit(void);
    5353void Clock_isr(void *arg_unused);
    54 
    55 /*
    56  *  Major and minor number.
    57  */
    58 
    59 rtems_device_major_number rtems_clock_major = UINT32_MAX;
    60 rtems_device_minor_number rtems_clock_minor;
    6154
    6255/*
     
    260253
    261254  /*
    262    * make major/minor avail to others such as shared memory driver
    263    */
    264 
    265   rtems_clock_major = major;
    266   rtems_clock_minor = minor;
    267 
    268   /*
    269255   *  If we are counting ISRs per tick, then initialize the counter.
    270256   */
  • cpukit/score/include/rtems/score/mrspimpl.h

    rb7f3ee9 r441e8ef  
    11/*
    2  * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
     2 * Copyright (c) 2014-2015 embedded brains GmbH.  All rights reserved.
    33 *
    44 *  embedded brains GmbH
     
    231231{
    232232  MRSP_Status status;
    233   const Scheduler_Control *scheduler = _Scheduler_Get( executing );
     233  const Scheduler_Control *scheduler = _Scheduler_Get_own( executing );
    234234  uint32_t scheduler_index = _Scheduler_Get_index( scheduler );
    235235  Priority_Control initial_priority = executing->current_priority;
  • cpukit/score/include/rtems/score/schedulerimpl.h

    rb7f3ee9 r441e8ef  
    1111 *  Copyright (C) 2010 Gedare Bloom.
    1212 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
    13  *  Copyright (c) 2014 embedded brains GmbH
     13 *  Copyright (c) 2014-2015 embedded brains GmbH
    1414 *
    1515 *  The license and distribution terms for this file may be
     
    951951);
    952952
    953 /**
    954  * @brief Use an idle thread for this scheduler node.
    955  *
    956  * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
    957  * helping state may use an idle thread for the scheduler node owned by itself
    958  * in case it executes currently using another scheduler node or in case it is
    959  * in a blocking state.
    960  *
    961  * @param[in] context The scheduler instance context.
    962  * @param[in] node The node which wants to use the idle thread.
    963  * @param[in] get_idle_thread Function to get an idle thread.
    964  */
    965 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
    966   Scheduler_Context         *context,
    967   Scheduler_Node            *node,
    968   Scheduler_Get_idle_thread  get_idle_thread
    969 )
    970 {
    971   Thread_Control *idle = ( *get_idle_thread )( context );
    972 
     953RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
     954  Scheduler_Node *node,
     955  Thread_Control *idle
     956)
     957{
    973958  _Assert(
    974959    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
     
    984969  _Scheduler_Node_set_user( node, idle );
    985970  node->idle = idle;
    986 
    987   return idle;
    988 }
    989 
    990 /**
    991  * @brief Try to schedule this scheduler node.
     971}
     972
     973/**
     974 * @brief Use an idle thread for this scheduler node.
     975 *
     976 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL
     977 * helping state may use an idle thread for the scheduler node owned by itself
     978 * in case it executes currently using another scheduler node or in case it is
     979 * in a blocking state.
    992980 *
    993981 * @param[in] context The scheduler instance context.
    994  * @param[in] node The node which wants to get scheduled.
     982 * @param[in] node The node which wants to use the idle thread.
    995983 * @param[in] get_idle_thread Function to get an idle thread.
    996  *
    997  * @retval true This node can be scheduled.
    998  * @retval false Otherwise.
    999  */
    1000 RTEMS_INLINE_ROUTINE bool _Scheduler_Try_to_schedule_node(
     984 */
     985RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
    1001986  Scheduler_Context         *context,
    1002987  Scheduler_Node            *node,
     
    1004989)
    1005990{
    1006   bool schedule;
     991  Thread_Control *idle = ( *get_idle_thread )( context );
     992
     993  _Scheduler_Set_idle_thread( node, idle );
     994
     995  return idle;
     996}
     997
     998typedef enum {
     999  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
     1000  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
     1001  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
     1002} Scheduler_Try_to_schedule_action;
     1003
     1004/**
     1005 * @brief Try to schedule this scheduler node.
     1006 *
     1007 * @param[in] context The scheduler instance context.
     1008 * @param[in] node The node which wants to get scheduled.
     1009 * @param[in] idle A potential idle thread used by a potential victim node.
     1010 * @param[in] get_idle_thread Function to get an idle thread.
     1011 *
     1012 * @retval true This node can be scheduled.
     1013 * @retval false Otherwise.
     1014 */
     1015RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
     1016_Scheduler_Try_to_schedule_node(
     1017  Scheduler_Context         *context,
     1018  Scheduler_Node            *node,
     1019  Thread_Control            *idle,
     1020  Scheduler_Get_idle_thread  get_idle_thread
     1021)
     1022{
     1023  Scheduler_Try_to_schedule_action action;
    10071024  Thread_Control *owner;
    10081025  Thread_Control *user;
    10091026
     1027  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
     1028
    10101029  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
    1011     return true;
     1030    return action;
    10121031  }
    10131032
     
    10191038      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
    10201039    } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
    1021       _Scheduler_Use_idle_thread( context, node, get_idle_thread );
     1040      if ( idle != NULL ) {
     1041        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
     1042      } else {
     1043        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
     1044      }
    10221045    } else {
    10231046      _Scheduler_Node_set_user( node, owner );
    10241047    }
    1025 
    1026     schedule = true;
    10271048  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
    10281049    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
    10291050      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
     1051    } else if ( idle != NULL ) {
     1052      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
    10301053    } else {
    10311054      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
    10321055    }
    1033 
    1034     schedule = true;
    10351056  } else {
    10361057    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
     
    10381059    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
    10391060      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
    1040       schedule = true;
    10411061    } else {
    1042       schedule = false;
     1062      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
    10431063    }
    10441064  }
    10451065
    1046   return schedule;
     1066  return action;
    10471067}
    10481068
     
    10771097
    10781098  return idle;
     1099}
     1100
     1101RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
     1102  Scheduler_Node *needs_idle,
     1103  Scheduler_Node *uses_idle,
     1104  Thread_Control *idle
     1105)
     1106{
     1107  uses_idle->idle = NULL;
     1108  _Scheduler_Node_set_user(
     1109    uses_idle,
     1110    _Scheduler_Node_get_owner( uses_idle )
     1111  );
     1112  _Scheduler_Set_idle_thread( needs_idle, idle );
    10791113}
    10801114
  • cpukit/score/include/rtems/score/schedulersmpimpl.h

    rb7f3ee9 r441e8ef  
    88
    99/*
    10  * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
     10 * Copyright (c) 2013-2015 embedded brains GmbH.  All rights reserved.
    1111 *
    1212 *  embedded brains GmbH
     
    533533)
    534534{
    535   Thread_Control *user = _Scheduler_Node_get_user( node );
    536   Thread_Control *lowest_scheduled_user =
    537     _Scheduler_Node_get_user( lowest_scheduled );
    538535  Thread_Control *needs_help;
    539   Thread_Control *idle;
    540 
    541   _Scheduler_SMP_Node_change_state(
    542     _Scheduler_SMP_Node_downcast( lowest_scheduled ),
    543     SCHEDULER_SMP_NODE_READY
    544   );
    545   _Scheduler_Thread_change_state(
    546     lowest_scheduled_user,
    547     THREAD_SCHEDULER_READY
    548   );
    549 
    550   _Scheduler_Thread_set_node( user, node );
    551 
    552   _Scheduler_SMP_Allocate_processor(
     536  Scheduler_Try_to_schedule_action action;
     537
     538  action = _Scheduler_Try_to_schedule_node(
    553539    context,
    554540    node,
    555     lowest_scheduled,
    556     allocate_processor
     541    _Scheduler_Node_get_idle( lowest_scheduled ),
     542    _Scheduler_SMP_Get_idle_thread
    557543  );
    558544
    559   ( *insert_scheduled )( context, node );
    560   ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
    561 
    562   idle = _Scheduler_Release_idle_thread(
    563     context,
    564     lowest_scheduled,
    565     _Scheduler_SMP_Release_idle_thread
    566   );
    567   if ( idle == NULL ) {
    568     needs_help = lowest_scheduled_user;
     545  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
     546    Thread_Control *lowest_scheduled_user =
     547      _Scheduler_Node_get_user( lowest_scheduled );
     548    Thread_Control *idle;
     549
     550    _Scheduler_SMP_Node_change_state(
     551      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
     552      SCHEDULER_SMP_NODE_READY
     553    );
     554    _Scheduler_Thread_change_state(
     555      lowest_scheduled_user,
     556      THREAD_SCHEDULER_READY
     557    );
     558
     559    _Scheduler_SMP_Allocate_processor(
     560      context,
     561      node,
     562      lowest_scheduled,
     563      allocate_processor
     564    );
     565
     566    ( *insert_scheduled )( context, node );
     567    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
     568
     569    idle = _Scheduler_Release_idle_thread(
     570      context,
     571      lowest_scheduled,
     572      _Scheduler_SMP_Release_idle_thread
     573    );
     574    if ( idle == NULL ) {
     575      needs_help = lowest_scheduled_user;
     576    } else {
     577      needs_help = NULL;
     578    }
     579  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
     580    _Scheduler_SMP_Node_change_state(
     581      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
     582      SCHEDULER_SMP_NODE_READY
     583    );
     584    _Scheduler_SMP_Node_change_state(
     585      _Scheduler_SMP_Node_downcast( node ),
     586      SCHEDULER_SMP_NODE_SCHEDULED
     587    );
     588
     589    ( *insert_scheduled )( context, node );
     590    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
     591
     592    _Scheduler_Exchange_idle_thread(
     593      node,
     594      lowest_scheduled,
     595      _Scheduler_Node_get_idle( lowest_scheduled )
     596    );
     597
     598    needs_help = NULL;
    569599  } else {
     600    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
     601    _Scheduler_SMP_Node_change_state(
     602      _Scheduler_SMP_Node_downcast( node ),
     603      SCHEDULER_SMP_NODE_BLOCKED
     604    );
    570605    needs_help = NULL;
    571606  }
     
    661696  Thread_Control *needs_help;
    662697
    663   while ( true ) {
     698  do {
    664699    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
    665700
     
    672707
    673708      needs_help = NULL;
    674 
    675       break;
    676     } else if (
    677       _Scheduler_Try_to_schedule_node(
     709    } else {
     710      Scheduler_Try_to_schedule_action action;
     711
     712      action = _Scheduler_Try_to_schedule_node(
    678713        context,
    679714        highest_ready,
     715        _Scheduler_Node_get_idle( node ),
    680716        _Scheduler_SMP_Get_idle_thread
    681       )
    682     ) {
    683       Thread_Control *user = _Scheduler_Node_get_user( node );
    684       Thread_Control *idle;
    685 
    686       _Scheduler_SMP_Node_change_state(
    687         _Scheduler_SMP_Node_downcast( node ),
    688         SCHEDULER_SMP_NODE_READY
    689717      );
    690       _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
    691 
    692       _Scheduler_SMP_Allocate_processor(
    693         context,
    694         highest_ready,
    695         node,
    696         allocate_processor
    697       );
    698 
    699       ( *insert_ready )( context, node );
    700       ( *move_from_ready_to_scheduled )( context, highest_ready );
    701 
    702       idle = _Scheduler_Release_idle_thread(
    703         context,
    704         node,
    705         _Scheduler_SMP_Release_idle_thread
    706       );
    707       if ( idle == NULL ) {
    708         needs_help = user;
     718
     719      if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
     720        Thread_Control *user = _Scheduler_Node_get_user( node );
     721        Thread_Control *idle;
     722
     723        _Scheduler_SMP_Node_change_state(
     724          _Scheduler_SMP_Node_downcast( node ),
     725          SCHEDULER_SMP_NODE_READY
     726        );
     727        _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
     728
     729        _Scheduler_SMP_Allocate_processor(
     730          context,
     731          highest_ready,
     732          node,
     733          allocate_processor
     734        );
     735
     736        ( *insert_ready )( context, node );
     737        ( *move_from_ready_to_scheduled )( context, highest_ready );
     738
     739        idle = _Scheduler_Release_idle_thread(
     740          context,
     741          node,
     742          _Scheduler_SMP_Release_idle_thread
     743        );
     744        if ( idle == NULL ) {
     745          needs_help = user;
     746        } else {
     747          needs_help = NULL;
     748        }
     749      } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
     750        _Scheduler_SMP_Node_change_state(
     751          _Scheduler_SMP_Node_downcast( node ),
     752          SCHEDULER_SMP_NODE_READY
     753        );
     754        _Scheduler_SMP_Node_change_state(
     755          _Scheduler_SMP_Node_downcast( highest_ready ),
     756          SCHEDULER_SMP_NODE_SCHEDULED
     757        );
     758
     759        ( *insert_ready )( context, node );
     760        ( *move_from_ready_to_scheduled )( context, highest_ready );
     761
     762        _Scheduler_Exchange_idle_thread(
     763          highest_ready,
     764          node,
     765          _Scheduler_Node_get_idle( node )
     766        );
     767
     768        needs_help = NULL;
    709769      } else {
    710         needs_help = NULL;
     770        _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
     771
     772        _Scheduler_SMP_Node_change_state(
     773          _Scheduler_SMP_Node_downcast( highest_ready ),
     774          SCHEDULER_SMP_NODE_BLOCKED
     775        );
     776
     777        ( *extract_from_ready )( context, highest_ready );
     778
     779        continue;
    711780      }
    712 
    713       break;
    714     } else {
    715       _Scheduler_SMP_Node_change_state(
    716         _Scheduler_SMP_Node_downcast( highest_ready ),
    717         SCHEDULER_SMP_NODE_BLOCKED
    718       );
    719 
    720       ( *extract_from_ready )( context, highest_ready );
    721781    }
    722   }
     782  } while ( false );
    723783
    724784  return needs_help;
     
    741801)
    742802{
    743   while ( true ) {
     803  do {
    744804    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
    745 
    746     if (
    747       _Scheduler_Try_to_schedule_node(
    748         context,
    749         highest_ready,
    750         _Scheduler_SMP_Get_idle_thread
    751       )
    752     ) {
     805    Scheduler_Try_to_schedule_action action;
     806
     807    action = _Scheduler_Try_to_schedule_node(
     808      context,
     809      highest_ready,
     810      NULL,
     811      _Scheduler_SMP_Get_idle_thread
     812    );
     813
     814    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
    753815      _Scheduler_SMP_Allocate_processor(
    754816        context,
     
    759821
    760822      ( *move_from_ready_to_scheduled )( context, highest_ready );
    761 
    762       break;
    763823    } else {
     824      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
     825
    764826      _Scheduler_SMP_Node_change_state(
    765827        _Scheduler_SMP_Node_downcast( highest_ready ),
     
    768830
    769831      ( *extract_from_ready )( context, highest_ready );
     832
     833      continue;
    770834    }
    771   }
     835  } while ( false );
    772836}
    773837
  • cpukit/score/src/objectidtoname.c

    rb7f3ee9 r441e8ef  
    3232  Objects_Control     *the_object = (Objects_Control *) 0;
    3333  Objects_Locations    ignored_location;
     34  ISR_lock_Context     lock_context;
    3435
    3536  /*
     
    5758  #endif
    5859
    59   the_object = _Objects_Get( information, tmpId, &ignored_location );
     60  the_object = _Objects_Get_isr_disable(
     61    information,
     62    tmpId,
     63    &ignored_location,
     64    &lock_context
     65  );
    6066  if ( !the_object )
    6167    return OBJECTS_INVALID_ID;
    6268
    6369  *name = the_object->name;
    64   _Objects_Put( the_object );
     70  _ISR_lock_ISR_enable( &lock_context );
    6571  return OBJECTS_NAME_OR_ID_LOOKUP_SUCCESSFUL;
    6672}
  • doc/bsp_howto/clock.t

    rb7f3ee9 r441e8ef  
    1919This section describes the global variables expected to be provided by
    2020this driver.
    21 
    22 @subsection Major and Minor Number
    23 
    24 The major and minor numbers of the clock driver are made available via
    25 the following variables. 
    26 
    27 @itemize @bullet
    28 @item rtems_device_major_number rtems_clock_major;
    29 @item rtems_device_minor_number rtems_clock_minor;
    30 @end itemize
    31 
    32 The clock device driver is responsible for declaring and
    33 initializing these variables.  These variables are used
    34 by other RTEMS components -- notably the Shared Memory Driver.
    35 
    36 @b{NOTE:} In a future RTEMS version, these variables may be replaced
    37 with the clock device driver registering @b{/dev/clock}.
    3821
    3922@subsection Ticks Counter
  • testsuites/smptests/smpmrsp01/init.c

    rb7f3ee9 r441e8ef  
    11/*
    2  * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
     2 * Copyright (c) 2014-2015 embedded brains GmbH.  All rights reserved.
    33 *
    44 *  embedded brains GmbH
     
    7575
    7676static test_context test_instance = {
    77   .barrier = SMP_BARRIER_CONTROL_INITIALIZER,
    7877  .switch_lock = SMP_LOCK_INITIALIZER("test instance switch lock")
    7978};
     
    8685    /* Wait */
    8786  }
     87}
     88
     89static void barrier_init(test_context *ctx)
     90{
     91  _SMP_barrier_Control_initialize(&ctx->barrier);
    8892}
    8993
     
    292296  change_prio(RTEMS_SELF, 3);
    293297
     298  barrier_init(ctx);
    294299  reset_switch_events(ctx);
    295300
     
    468473}
    469474
     475static void obtain_after_migration_worker(rtems_task_argument arg)
     476{
     477  test_context *ctx = &test_instance;
     478  rtems_status_code sc;
     479  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
     480
     481  assert_prio(RTEMS_SELF, 3);
     482
     483  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     484  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     485
     486  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
     487  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     488
     489  /* Worker done (K) */
     490  barrier(ctx, &barrier_state);
     491
     492  while (true) {
     493    /* Wait for termination */
     494  }
     495}
     496
     497static void obtain_after_migration_high(rtems_task_argument arg)
     498{
     499  test_context *ctx = &test_instance;
     500  rtems_status_code sc;
     501  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
     502
     503  assert_prio(RTEMS_SELF, 2);
     504
     505  sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     506  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     507
     508  /* Obtain done (I) */
     509  barrier(ctx, &barrier_state);
     510
     511  /* Ready to release (J) */
     512  barrier(ctx, &barrier_state);
     513
     514  sc = rtems_semaphore_release(ctx->mrsp_ids[1]);
     515  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     516
     517  rtems_task_suspend(RTEMS_SELF);
     518  rtems_test_assert(0);
     519}
     520
     521static void test_mrsp_obtain_after_migration(test_context *ctx)
     522{
     523  rtems_status_code sc;
     524  rtems_task_priority prio;
     525  rtems_id scheduler_id;
     526  SMP_barrier_State barrier_state;
     527
     528  puts("test MrsP obtain after migration");
     529
     530  change_prio(RTEMS_SELF, 3);
     531
     532  barrier_init(ctx);
     533  reset_switch_events(ctx);
     534
     535  /* Create tasks */
     536
     537  sc = rtems_task_create(
     538    rtems_build_name('H', 'I', 'G', '0'),
     539    2,
     540    RTEMS_MINIMUM_STACK_SIZE,
     541    RTEMS_DEFAULT_MODES,
     542    RTEMS_DEFAULT_ATTRIBUTES,
     543    &ctx->high_task_id[0]
     544  );
     545  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     546
     547  sc = rtems_task_create(
     548    rtems_build_name('W', 'O', 'R', 'K'),
     549    3,
     550    RTEMS_MINIMUM_STACK_SIZE,
     551    RTEMS_DEFAULT_MODES,
     552    RTEMS_DEFAULT_ATTRIBUTES,
     553    &ctx->worker_ids[0]
     554  );
     555  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     556
     557  sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
     558  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     559
     560  /* Create a MrsP semaphore objects */
     561
     562  sc = rtems_semaphore_create(
     563    rtems_build_name('M', 'R', 'S', 'P'),
     564    1,
     565    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
     566      | RTEMS_BINARY_SEMAPHORE,
     567    3,
     568    &ctx->mrsp_ids[0]
     569  );
     570  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     571
     572  sc = rtems_semaphore_create(
     573    rtems_build_name('M', 'R', 'S', 'P'),
     574    1,
     575    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
     576      | RTEMS_BINARY_SEMAPHORE,
     577    2,
     578    &ctx->mrsp_ids[1]
     579  );
     580  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     581
     582  sc = rtems_semaphore_create(
     583    rtems_build_name('M', 'R', 'S', 'P'),
     584    1,
     585    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
     586      | RTEMS_BINARY_SEMAPHORE,
     587    1,
     588    &ctx->mrsp_ids[2]
     589  );
     590  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     591
     592  prio = 4;
     593  sc = rtems_semaphore_set_priority(
     594    ctx->mrsp_ids[2],
     595    ctx->scheduler_ids[1],
     596    prio,
     597    &prio
     598  );
     599  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     600  rtems_test_assert(prio == 1);
     601
     602  /* Check executing task parameters */
     603
     604  sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
     605  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     606
     607  rtems_test_assert(ctx->scheduler_ids[0] == scheduler_id);
     608
     609  assert_prio(RTEMS_SELF, 3);
     610
     611  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     612  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     613
     614  assert_prio(RTEMS_SELF, 3);
     615
     616  /* Start other tasks */
     617
     618  sc = rtems_task_start(ctx->worker_ids[0], obtain_after_migration_worker, 0);
     619  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     620
     621  sc = rtems_task_start(ctx->high_task_id[0], obtain_after_migration_high, 0);
     622  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     623
     624  rtems_test_assert(rtems_get_current_processor() == 1);
     625
     626  /* Obtain done (I) */
     627  _SMP_barrier_State_initialize(&barrier_state);
     628  barrier(ctx, &barrier_state);
     629
     630  sc = rtems_task_suspend(ctx->high_task_id[0]);
     631  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     632
     633  rtems_test_assert(rtems_get_current_processor() == 1);
     634
     635  /*
     636   * Obtain second MrsP semaphore and ensure that we change the priority of our
     637   * own scheduler node and not the one we are currently using.
     638   */
     639
     640  sc = rtems_semaphore_obtain(ctx->mrsp_ids[2], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     641  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     642
     643  assert_prio(RTEMS_SELF, 1);
     644
     645  rtems_test_assert(rtems_get_current_processor() == 1);
     646
     647  sc = rtems_semaphore_release(ctx->mrsp_ids[2]);
     648  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     649
     650  sc = rtems_task_resume(ctx->high_task_id[0]);
     651  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     652
     653  /* Ready to release (J) */
     654  barrier(ctx, &barrier_state);
     655
     656  rtems_test_assert(rtems_get_current_processor() == 1);
     657
     658  /* Prepare barrier for worker */
     659  barrier_init(ctx);
     660  _SMP_barrier_State_initialize(&barrier_state);
     661
     662  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
     663  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     664
     665  rtems_test_assert(rtems_get_current_processor() == 0);
     666
     667  print_switch_events(ctx);
     668
     669  /* Worker done (K) */
     670  barrier(ctx, &barrier_state);
     671
     672  sc = rtems_task_delete(ctx->worker_ids[0]);
     673  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     674
     675  sc = rtems_task_delete(ctx->high_task_id[0]);
     676  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     677
     678  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
     679  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     680
     681  sc = rtems_semaphore_delete(ctx->mrsp_ids[1]);
     682  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     683
     684  sc = rtems_semaphore_delete(ctx->mrsp_ids[2]);
     685  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     686}
     687
    470688static void test_mrsp_flush_error(void)
    471689{
     
    10351253  change_prio(RTEMS_SELF, 4);
    10361254
     1255  barrier_init(ctx);
    10371256  reset_switch_events(ctx);
    10381257
     
    16381857  test_mrsp_multiple_obtain();
    16391858  test_mrsp_various_block_and_unblock(ctx);
     1859  test_mrsp_obtain_after_migration(ctx);
    16401860  test_mrsp_obtain_and_sleep_and_release(ctx);
    16411861  test_mrsp_obtain_and_release_with_help(ctx);
  • testsuites/smptests/smpmrsp01/smpmrsp01.scn

    rb7f3ee9 r441e8ef  
    2424[1] MAIN -> HIG1 (prio   2, node HIG1)
    2525[1] HIG1 -> MAIN (prio   3, node WORK)
    26 [0] HIG0 -> IDLE (prio   4, node MAIN)
     26[0] HIG0 -> IDLE (prio 255, node IDLE)
     27[0] IDLE -> MAIN (prio   4, node MAIN)
    2728[1] MAIN -> WORK (prio   3, node WORK)
    28 [0] IDLE -> MAIN (prio   4, node MAIN)
     29test MrsP obtain after migration
     30[1] IDLE -> WORK (prio   3, node WORK)
     31[0] MAIN -> HIG0 (prio   2, node HIG0)
     32[1] WORK -> MAIN (prio   3, node WORK)
     33[0] HIG0 -> IDLE (prio   2, node HIG0)
     34[0] IDLE -> HIG0 (prio   2, node HIG0)
     35[1] MAIN -> WORK (prio   3, node WORK)
     36[0] HIG0 -> MAIN (prio   3, node MAIN)
    2937test MrsP obtain and sleep and release
    3038[0] MAIN ->  RUN (prio   2, node  RUN)
     
    4351[1] MAIN -> HELP (prio   2, node HELP)
    4452[1] HELP -> MAIN (prio   2, node HELP)
    45 [0] IDLE -> MAIN (prio   3, node MAIN)
     53[0] IDLE -> MAIN (prio   1, node MAIN)
    4654[1] MAIN -> HELP (prio   2, node HELP)
    4755test MrsP obtain and release
     
    4957[1] WORK -> MAIN (prio   3, node WORK)
    5058[0] MAIN -> HIG0 (prio   1, node HIG0)
    51 [1] MAIN -> WORK (prio   4, node WORK)
     59[1] MAIN -> WORK (prio   3, node WORK)
    5260[0] HIG0 -> MAIN (prio   2, node MAIN)
    5361test MrsP load
    5462worker[0]
    55   sleep = 53
    56   timeout = 3445
    57   obtain[0] = 7240
    58   obtain[1] = 5484
    59   obtain[2] = 12983
    60   obtain[3] = 9453
    61   obtain[4] = 16142
    62   obtain[5] = 12509
    63   obtain[6] = 16471
    64   obtain[7] = 14380
    65   obtain[8] = 16566
    66   obtain[9] = 16192
    67   obtain[10] = 14868
    68   obtain[11] = 18208
    69   obtain[12] = 12505
    70   obtain[13] = 19995
    71   obtain[14] = 11155
    72   obtain[15] = 20684
    73   obtain[16] = 7288
    74   obtain[17] = 22252
    75   obtain[18] = 6476
    76   obtain[19] = 18299
    77   obtain[20] = 5711
    78   obtain[21] = 17063
    79   obtain[22] = 4791
    80   obtain[23] = 14655
    81   obtain[24] = 3452
    82   obtain[25] = 10565
    83   obtain[26] = 2912
    84   obtain[27] = 8142
    85   obtain[28] = 2090
    86   obtain[29] = 5086
    87   obtain[30] = 1145
    88   obtain[31] = 1946
    89   cpu[0] = 378475
    90   cpu[1] = 64814
    91   cpu[2] = 132133
    92   cpu[3] = 138047
     63  sleep = 7
     64  timeout = 1780
     65  obtain[0] = 607
     66  obtain[1] = 443
     67  obtain[2] = 988
     68  obtain[3] = 659
     69  obtain[4] = 1169
     70  obtain[5] = 846
     71  obtain[6] = 1267
     72  obtain[7] = 854
     73  obtain[8] = 1016
     74  obtain[9] = 1079
     75  obtain[10] = 1165
     76  obtain[11] = 1020
     77  obtai[12] = 767
     78  obtain[13] = 925
     79  obtain[14] = 792
     80  obtain[15] = 881
     81  obtain[16] = 439
     82  obtain[17] = 1007
     83  obtain[18] = 243
     84  obtain[19] = 853
     85  obtain[20] = 210
     86  obtain[21] = 445
     87  obtain[22] = 247
     88  obtain[23] = 497
     89  obtain[24] = 102
     90  obtain[25] = 580
     91  obtain[26] = 90
     92  obtain[27] = 186
     93  obtain[28] = 74
     94  obtain[29] = 139
     95  obtain[30] = 68
     96  obtain[31] = 98
     97  cpu[0] = 27776
     98  cpu[1] = 2795
     99  cpu[2] = 4397
     100  cpu[3] = 4551
    93101worker[1]
    94102  sleep = 1
    95   timeout = 6
    96   obtain[0] = 19
    97   obtain[1] = 8
    98   obtain[2] = 15
    99   obtain[3] = 24
    100   obtain[4] = 20
    101   obtain[5] = 19
    102   obtain[6] = 14
    103   obtain[7] = 40
    104   obtain[8] = 45
    105   obtain[9] = 20
     103  timeout = 0
     104  obtain[0] = 1
     105  obtain[1] = 0
     106  obtain[2] = 3
     107  obtain[3] = 0
     108  obtain[4] = 0
     109  obtain[5] = 0
     110  obtain[6] = 0
     111  obtain[7] = 0
     112  obtain[8] = 0
     113  obtain[9] = 0
    106114  obtain[10] = 0
    107   obtain[11] = 48
    108   obtain[12] = 13
    109   obtain[13] = 57
    110   obtain[14] = 30
    111   obtain[15] = 48
    112   obtain[16] = 36
    113   obtain[17] = 36
    114   obtain[18] = 19
    115   obtain[19] = 20
    116   obtain[20] = 42
    117   obtain[21] = 44
    118   obtain[22] = 23
     115  obtain[11] = 0
     116  obtain[12] = 0
     117  obtain[13] = 0
     118  obtain[14] = 0
     119  obtain[15] = 0
     120  obtain[16] = 0
     121  obtain[17] = 0
     122  obtain[18] = 0
     123  obtain[19] = 0
     124  obtain[20] = 0
     125  obtain[21] = 0
     126  obtain[22] = 0
    119127  obtain[23] = 0
    120128  obtain[24] = 0
    121   obtain[25] = 26
     129  obtain[25] = 0
    122130  obtain[26] = 0
    123131  obtain[27] = 0
     
    126134  obtain[30] = 0
    127135  obtain[31] = 0
    128   cpu[0] = 650
    129   cpu[1] = 92
    130   cpu[2] = 379
    131   cpu[3] = 212
     136  cpu[0] = 9
     137  cpu[1] = 0
     138  cpu[2] = 0
     139  cpu[3] = 0
    132140worker[2]
    133   sleep = 51
    134   timeout = 3731
    135   obtain[0] = 7182
    136   obtain[1] = 5663
    137   obtain[2] = 12945
    138   obtain[3] = 9229
    139   obtain[4] = 15592
    140   obtain[5] = 12125
    141   obtain[6] = 16767
    142   obtain[7] = 14480
    143   obtain[8] = 16620
    144   obtain[9] = 16098
    145   obtain[10] = 16409
    146   obtain[11] = 18109
    147   obtain[12] = 12995
    148   obtain[13] = 19452
    149   obtain[14] = 10719
    150   obtain[15] = 20024
    151   obtain[16] = 7769
    152   obtain[17] = 21913
    153   obtain[18] = 6636
    154   obtain[19] = 18524
    155   obtain[20] = 5952
    156   obtain[21] = 16411
    157   obtain[22] = 5228
    158   obtain[23] = 14456
    159   obtain[24] = 4292
    160   obtain[25] = 11143
    161   obtain[26] = 3019
    162   obtain[27] = 8023
    163   obtain[28] = 2006
    164   obtain[29] = 4664
    165   obtain[30] = 1109
    166   obtain[31] = 1976
    167   cpu[0] = 65356
    168   cpu[1] = 381723
    169   cpu[2] = 133444
    170   cpu[3] = 134588
     141  sleep = 5
     142  timeout = 2083
     143  obtain[0] = 740
     144  obtain[1] = 489
     145  obtain[2] = 1232
     146  obtain[3] = 732
     147  obtain[4] = 1361
     148  obtain[5] = 1070
     149  obtain[6] = 1334
     150  obtain[7] = 997
     151  obtain[8] = 1418
     152  obtain[9] = 1087
     153  obtain[10] = 1005
     154  obtain[11] = 1088
     155  obtain[12] = 865
     156  obtain[13] = 1279
     157  obtain[14] = 698
     158  obtain[15] = 1152
     159  obtain[16] = 339
     160  obtain[17] = 1347
     161  obtain[18] = 340
     162  obtain[19] = 723
     163  obtain[20] = 295
     164  obtain[21] = 933
     165  obtain[22] = 223
     166  obtain[23] = 633
     167  obtain[24] = 236
     168  obtain[25] = 405
     169  obtain[26] = 140
     170  obtain[27] = 261
     171  obtain[28] = 70
     172  obtain[29] = 157
     173  obtain[30] = 89
     174  obtain[31] = 71
     175  cpu[0] = 1931
     176  cpu[1] = 35336
     177  cpu[2] = 4338
     178  cpu[3] = 4018
    171179worker[3]
    172180  sleep = 1
    173   timeout = 11
    174   obtain[0] = 11
    175   obtain[1] = 6
    176   obtain[2] = 33
    177   obtain[3] = 20
    178   obtain[4] = 10
    179   obtain[5] = 10
    180   obtain[6] = 28
    181   obtain[7] = 18
    182   obtain[8] = 27
    183   obtain[9] = 40
    184   obtain[10] = 33
    185   obtain[11] = 36
    186   obtain[12] = 26
     181  timeout = 1
     182  obtain[0] = 0
     183  obtain[1] = 0
     184  obtain[2] = 3
     185  obtain[3] = 0
     186  obtain[4] = 5
     187  obtain[5] = 0
     188  obtain[6] = 0
     189  obtain[7] = 0
     190  obtain[8] = 0
     191  obtain[9] = 0
     192  obtain[10] = 0
     193  obtain[11] = 0
     194  obtain[12] = 0
    187195  obtain[13] = 0
    188   obtain[14] = 15
    189   obtain[15] = 16
     196  obtain[14] = 0
     197  obtain[15] = 0
    190198  obtain[16] = 0
    191   obtain[17] = 18
     199  obtain[17] = 0
    192200  obtain[18] = 0
    193   obtain[19] = 42
     201  obtain[19] = 0
    194202  obtain[20] = 0
    195   obtain[21] = 88
     203  obtain[21] = 0
    196204  obtain[22] = 0
    197   obtain[23] = 24
     205  obtain[23] = 0
    198206  obtain[24] = 0
    199207  obtain[25] = 0
    200208  obtain[26] = 0
    201   obtain[27] = 28
    202   obtain[28] = 0
    203   obtain[29] = 0
    204   obtain[30] = 31
    205   obtain[31] = 0
    206   cpu[0] = 136
    207   cpu[1] = 573
    208   cpu[2] = 291
    209   cpu[3] = 121
    210 worker[4]
    211   sleep = 47
    212   timeout = 3278
    213   obtain[0] = 7397
    214   obtain[1] = 5723
    215   obtain[2] = 13399
    216   obtain[3] = 9018
    217   obtain[4] = 16575
    218   obtain[5] = 12731
    219   obtain[6] = 16571
    220   obtain[7] = 14376
    221   obtain[8] = 16786
    222   obtain[9] = 17022
    223   obtain[10] = 15889
    224   obtain[11] = 19338
    225   obtain[12] = 13240
    226   obtain[13] = 19055
    227   obtain[14] = 11533
    228   obtain[15] = 22667
    229   obtain[16] = 7521
    230   obtain[17] = 21826
    231   obtain[18] = 6320
    232   obtain[19] = 18522
    233   obtain[20] = 6874
    234   obtain[21] = 16498
    235   obtain[22] = 4983
    236   obtain[23] = 14210
    237   obtain[24] = 4019
    238   obtain[25] = 11510
    239   obtain[26] = 3425
    240   obtain[27] = 8809
    241   obtain[28] = 2002
    242   obtain[29] = 5197
    243   obtain[30] = 996
    244   obtain[31] = 2276
    245   cpu[0] = 20729
    246   cpu[1] = 19760
    247   cpu[2] = 343613
    248   cpu[3] = 348561
    249 worker[5]
    250   sleep = 61
    251   timeout = 3183
    252   obtain[0] = 7291
    253   obtain[1] = 5782
    254   obtain[2] = 13633
    255   obtain[3] = 9864
    256   obtain[4] = 16465
    257   obtain[5] = 12581
    258   obtain[6] = 17135
    259   obtain[7] = 14616
    260   obtain[8] = 16524
    261   obtain[9] = 16472
    262   obtain[10] = 15194
    263   obtain[11] = 18038
    264   obtain[12] = 13801
    265   obtain[13] = 19959
    266   obtain[14] = 11693
    267   obtain[15] = 20770
    268   obtain[16] = 7328
    269   obtain[17] = 23222
    270   obtain[18] = 7186
    271   obtain[19] = 19739
    272   obtain[20] = 6584
    273   obtain[21] = 17450
    274   obtain[22] = 5241
    275   obtain[23] = 14808
    276   obtain[24] = 4287
    277   obtain[25] = 11387
    278   obtain[26] = 3367
    279   obtain[27] = 8149
    280   obtain[28] = 1887
    281   obtain[29] = 4969
    282   obtain[30] = 1123
    283   obtain[31] = 1695
    284   cpu[0] = 19504
    285   cpu[1] = 20069
    286   cpu[2] = 346015
    287   cpu[3] = 350953
    288 worker[6]
    289   sleep = 1
    290   timeout = 15
    291   obtain[0] = 26
    292   obtain[1] = 22
    293   obtain[2] = 45
    294   obtain[3] = 32
    295   obtain[4] = 45
    296   obtain[5] = 76
    297   obtain[6] = 49
    298   obtain[7] = 64
    299   obtain[8] = 99
    300   obtain[9] = 70
    301   obtain[10] = 55
    302   obtain[11] = 48
    303   obtain[12] = 39
    304   obtain[13] = 28
    305   obtain[14] = 60
    306   obtain[15] = 48
    307   obtain[16] = 17
    308   obtain[17] = 74
    309   obtain[18] = 38
    310   obtain[19] = 60
    311   obtain[20] = 63
    312   obtain[21] = 66
    313   obtain[22] = 23
    314   obtain[23] = 48
    315   obtain[24] = 0
    316   obtain[25] = 78
    317   obtain[26] = 0
    318   obtain[27] = 43
     209  obtain[27] = 0
    319210  obtain[28] = 0
    320211  obtain[29] = 0
    321212  obtain[30] = 0
    322   obtain[31] = 32
    323   cpu[0] = 71
    324   cpu[1] = 39
    325   cpu[2] = 1333
    326   cpu[3] = 1254
     213  obtain[31] = 0
     214  cpu[0] = 0
     215  cpu[1] = 14
     216  cpu[2] = 0
     217  cpu[3] = 3
     218worker[4]
     219  sleep = 9
     220  timeout = 2196
     221  obtain[0] = 896
     222  obtain[1] = 565
     223  obtain[2] = 1443
     224  obtain[3] = 936
     225  obtain[4] = 1506
     226  obtain[5] = 1028
     227  obtain[6] = 1541
     228  obtain[7] = 1088
     229  obtain[8] = 1683
     230  obtain[9] = 1494
     231  obtain[10] = 1283
     232  obtain[11] = 1075
     233  obtain[12] = 1101
     234  obtain[13] = 1038
     235  obtain[14] = 758
     236  obtain[15] = 1300
     237  obtain[16] = 350
     238  obtain[17] = 1180
     239  obtain[18] = 396
     240  obtain[19] = 1171
     241  obtain[20] = 232
     242  obtain[21] = 767
     243  obtain[22] = 336
     244  obtain[23] = 470
     245  obtain[24] = 196
     246  obtain[25] = 461
     247  obtain[26] = 148
     248  obtain[27] = 394
     249  obtain[28] = 68
     250  obtain[29] = 259
     251  obtain[30] = 80
     252  obtain[31] = 54
     253  cpu[0] = 725
     254  cpu[1] = 1001
     255  cpu[2] = 25845
     256  cpu[3] = 23032
     257worker[5]
     258  sleep = 8
     259  timeout = 2062
     260  obtain[0] = 754
     261  obtain[1] = 540
     262  obtain[2] = 1318
     263  obtain[3] = 886
     264  obtain[4] = 1396
     265  obtain[5] = 1030
     266  obtain[6] = 1556
     267  obtain[7] = 1126
     268  obtain[8] = 1338
     269  obtain[9] = 1061
     270  obtain[10] = 1173
     271  obtain[11] = 1396
     272  obtain[12] = 1130
     273  obtain[13] = 1189
     274  obtain[14] = 867
     275  obtain[15] = 1290
     276  obtain[16] = 339
     277  obtain[17] = 1177
     278  obtain[18] = 396
     279  obtain[19] = 915
     280  obtain[20] = 236
     281  obtain[21] = 1084
     282  obtain[22] = 146
     283  obtain[23] = 699
     284  obtain[24] = 185
     285  obtain[25] = 562
     286  obtain[26] = 120
     287  obtain[27] = 423
     288  obtain[28] = 153
     289  obtain[29] = 347
     290  obtain[30] = 28
     291  obtain[31] = 250
     292  cpu[0] = 911
     293  cpu[1] = 1018
     294  cpu[2] = 23145
     295  cpu[3] = 25154
     296worker[6]
     297  sleep = 1
     298  timeout = 3
     299  obtain[0] = 3
     300  obtain[1] = 0
     301  obtain[2] = 3
     302  obtain[3] = 0
     303  obtain[4] = 0
     304  obtain[5] = 6
     305  obtain[6] = 0
     306  obtain[7] = 8
     307  obtain[8] = 3
     308  obtain[9] = 0
     309  obtain[10] = 0
     310  obtain[11] = 0
     311  obtain[12] = 13
     312  obtain[13] = 0
     313  obtain[14] = 0
     314  obtain[15] = 0
     315  obtain[16] = 2
     316  obtain[17] = 0
     317  obtain[18] = 0
     318  obtain[19] = 0
     319  obtain[20] = 0
     320  obtain[21] = 0
     321  obtain[22] = 0
     322  obtain[23] = 0
     323  obtain[24] = 0
     324  obtain[25] = 0
     325  obtain[26] = 0
     326  obtain[27] = 0
     327  obtain[28] = 0
     328  obtain[29] = 0
     329  obtain[30] = 0
     330  obtain[31] = 0
     331  cpu[0] = 0
     332  cpu[1] = 11
     333  cpu[2] = 42
     334  cpu[3] = 24
    327335worker[7]
    328   sleep = 1
     336  sleep = 0
    329337  timeout = 0
    330338  obtain[0] = 0
     
    362370  cpu[0] = 0
    363371  cpu[1] = 0
    364   cpu[2] = 1
     372  cpu[2] = 0
    365373  cpu[3] = 0
    366 migrations[0] = 437361
    367 migrations[1] = 437363
    368 migrations[2] = 441234
    369 migrations[3] = 433487
     374migrations[0] = 20731
     375migrations[1] = 20731
     376migrations[2] = 20366
     377migrations[3] = 21099
    370378*** END OF TEST SMPMRSP 1 ***
Note: See TracChangeset for help on using the changeset viewer.