Changeset 86d3a2e in rtems


Ignore:
Timestamp:
Nov 3, 2016, 6:33:11 AM (4 years ago)
Author:
Sebastian Huber <sebastian.huber@…>
Branches:
5, master
Children:
d505fbb
Parents:
63e2ca1b
git-author:
Sebastian Huber <sebastian.huber@…> (11/03/16 06:33:11)
git-committer:
Sebastian Huber <sebastian.huber@…> (11/03/16 09:02:39)
Message:

testsupport: Determine worker index via processor

Determine worker index via the current processor index to get consistent
job runs with respect to the cache topology.

Location:
cpukit/libmisc/testsupport
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • cpukit/libmisc/testsupport/test.h

    r63e2ca1b r86d3a2e  
    11/*
    2  * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
     2 * Copyright (c) 2014, 2016 embedded brains GmbH.  All rights reserved.
    33 *
    44 *  embedded brains GmbH
     
    8989int rtems_test_printf(const char* format, ...) RTEMS_PRINTFLIKE(1, 2);
    9090
     91#define RTEMS_TEST_PARALLEL_PROCESSOR_MAX 32
     92
     93typedef struct rtems_test_parallel_job rtems_test_parallel_job;
     94
    9195/**
    9296 * @brief Internal context for parallel job execution.
     
    96100  SMP_barrier_Control barrier;
    97101  size_t worker_count;
    98   rtems_id worker_ids[32];
     102  rtems_id worker_ids[RTEMS_TEST_PARALLEL_PROCESSOR_MAX];
    99103  rtems_id stop_worker_timer_id;
     104  const struct rtems_test_parallel_job *jobs;
     105  size_t job_count;
    100106} rtems_test_parallel_context;
    101107
     
    119125 * @brief Basic parallel job description.
    120126 */
    121 typedef struct {
     127struct rtems_test_parallel_job {
    122128  /**
    123129   * @brief Job initialization handler.
     
    187193   */
    188194  bool cascade;
    189 } rtems_test_parallel_job;
     195};
    190196
    191197/**
  • cpukit/libmisc/testsupport/testparallel.c

    r63e2ca1b r86d3a2e  
    11/*
    2  * Copyright (c) 2013-2015 embedded brains GmbH.  All rights reserved.
     2 * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
    33 *
    44 *  embedded brains GmbH
     
    5151  rtems_test_parallel_context *ctx,
    5252  const rtems_test_parallel_job *jobs,
    53   size_t job_count,
    54   size_t worker_index
     53  size_t job_count
    5554)
    5655{
    5756  SMP_barrier_State bs = SMP_BARRIER_STATE_INITIALIZER;
    5857  size_t i;
     58
     59  _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
    5960
    6061  for (i = 0; i < job_count; ++i) {
     
    6566    while (j < n) {
    6667      size_t active_worker = j + 1;
     68      size_t worker_index;
     69      rtems_interrupt_level level;
     70
     71      /*
     72       * Determine worker index via the current processor index to get
     73       * consistent job runs with respect to the cache topology.
     74       */
     75      rtems_interrupt_local_disable(level);
     76      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
     77      worker_index = rtems_get_current_processor();
     78      rtems_interrupt_local_enable(level);
     79
     80      _Assert(worker_index < ctx->worker_count);
    6781
    6882      if (rtems_test_parallel_is_master_worker(worker_index)) {
     
    86100      }
    87101
     102      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
     103
    88104      ++j;
    89105    }
     
    91107}
    92108
    93 typedef struct {
    94   rtems_test_parallel_context *ctx;
    95   const rtems_test_parallel_job *jobs;
    96   size_t job_count;
    97   size_t worker_index;
    98 } worker_arg;
    99 
    100109static void worker_task(rtems_task_argument arg)
    101110{
    102   worker_arg warg = *(worker_arg *) arg;
     111  rtems_test_parallel_context *ctx = (rtems_test_parallel_context *) arg;
    103112  rtems_status_code sc;
    104113
    105   sc = rtems_event_transient_send(warg.ctx->worker_ids[0]);
    106   _Assert(sc == RTEMS_SUCCESSFUL);
    107114  (void) sc;
    108115
    109   run_tests(warg.ctx, warg.jobs, warg.job_count, warg.worker_index);
     116  run_tests(ctx, ctx->jobs, ctx->job_count);
    110117
    111118  while (true) {
    112119    /* Wait for delete by master worker */
    113120  }
    114 }
    115 
    116 static char digit(size_t i, size_t pos)
    117 {
    118   return '0' + (i / pos) % 10;
    119121}
    120122
     
    134136  ctx->worker_count = rtems_get_processor_count();
    135137  ctx->worker_ids[0] = rtems_task_self();
     138  ctx->jobs = jobs;
     139  ctx->job_count = job_count;
    136140
    137141  if (RTEMS_ARRAY_SIZE(ctx->worker_ids) < ctx->worker_count) {
     
    157161
    158162  for (worker_index = 1; worker_index < ctx->worker_count; ++worker_index) {
    159     worker_arg warg = {
    160       .ctx = ctx,
    161       .jobs = jobs,
    162       .job_count = job_count,
    163       .worker_index = worker_index
    164     };
    165163    rtems_id worker_id;
    166164
    167165    sc = rtems_task_create(
    168       rtems_build_name(
    169         'W',
    170         digit(worker_index, 100),
    171         digit(worker_index, 10),
    172         digit(worker_index, 1)
    173       ),
     166      rtems_build_name('W', 'O', 'R', 'K'),
    174167      worker_priority,
    175168      RTEMS_MINIMUM_STACK_SIZE,
     
    188181    }
    189182
    190     sc = rtems_task_start(worker_id, worker_task, (rtems_task_argument) &warg);
    191     _Assert(sc == RTEMS_SUCCESSFUL);
    192 
    193     sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     183    sc = rtems_task_start(worker_id, worker_task, (rtems_task_argument) ctx);
    194184    _Assert(sc == RTEMS_SUCCESSFUL);
    195185  }
    196186
    197   run_tests(ctx, jobs, job_count, 0);
     187  run_tests(ctx, jobs, job_count);
    198188
    199189  for (worker_index = 1; worker_index < ctx->worker_count; ++worker_index) {
Note: See TracChangeset for help on using the changeset viewer.