source: rtems/cpukit/libmisc/testsupport/testparallel.c @ 8c7eb00

4.115
Last change on this file since 8c7eb00 was 8c7eb00, checked in by Sebastian Huber <sebastian.huber@…>, on 03/17/15 at 09:32:20

testsupport: Add worker setup handler

Add rtems_test_parallel_get_task_id().

  • Property mode set to 100644
File size: 4.6 KB
Line 
1/*
2 * Copyright (c) 2013-2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15
16#ifdef HAVE_CONFIG_H
17  #include "config.h"
18#endif
19
20#include <rtems/test.h>
21#include <rtems/score/assert.h>
22#include <rtems.h>
23
24static void stop_worker_timer(rtems_id timer_id, void *arg)
25{
26  rtems_test_parallel_context *ctx = arg;
27
28  _Atomic_Store_ulong(&ctx->stop, 1, ATOMIC_ORDER_RELAXED);
29}
30
31static void start_worker_stop_timer(
32  rtems_test_parallel_context *ctx,
33  rtems_interval duration
34)
35{
36  rtems_status_code sc;
37
38  _Atomic_Store_ulong(&ctx->stop, 0, ATOMIC_ORDER_RELEASE);
39
40  sc = rtems_timer_fire_after(
41    ctx->stop_worker_timer_id,
42    duration,
43    stop_worker_timer,
44    ctx
45  );
46  _Assert(sc == RTEMS_SUCCESSFUL);
47  (void) sc;
48}
49
50static void run_tests(
51  rtems_test_parallel_context *ctx,
52  const rtems_test_parallel_job *jobs,
53  size_t job_count,
54  size_t worker_index
55)
56{
57  SMP_barrier_State bs = SMP_BARRIER_STATE_INITIALIZER;
58  size_t i;
59
60  for (i = 0; i < job_count; ++i) {
61    const rtems_test_parallel_job *job = &jobs[i];
62    size_t n = rtems_get_processor_count();
63    size_t j = job->cascade ? 0 : rtems_get_processor_count() - 1;
64
65    while (j < n) {
66      size_t active_worker = j + 1;
67
68      if (rtems_test_parallel_is_master_worker(worker_index)) {
69        rtems_interval duration = (*job->init)(ctx, job->arg, active_worker);
70
71        start_worker_stop_timer(ctx, duration);
72      }
73
74      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
75
76      if (worker_index <= j) {
77        (*job->body)(ctx, job->arg, active_worker, worker_index);
78      }
79
80      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
81
82      if (rtems_test_parallel_is_master_worker(worker_index)) {
83        (*job->fini)(ctx, job->arg, active_worker);
84      }
85
86      ++j;
87    }
88  }
89}
90
91typedef struct {
92  rtems_test_parallel_context *ctx;
93  const rtems_test_parallel_job *jobs;
94  size_t job_count;
95  size_t worker_index;
96} worker_arg;
97
98static void worker_task(rtems_task_argument arg)
99{
100  worker_arg warg = *(worker_arg *) arg;
101  rtems_status_code sc;
102
103  sc = rtems_event_transient_send(warg.ctx->worker_ids[0]);
104  _Assert(sc == RTEMS_SUCCESSFUL);
105  (void) sc;
106
107  run_tests(warg.ctx, warg.jobs, warg.job_count, warg.worker_index);
108
109  rtems_task_suspend(RTEMS_SELF);
110}
111
112void rtems_test_parallel(
113  rtems_test_parallel_context *ctx,
114  rtems_test_parallel_worker_setup worker_setup,
115  const rtems_test_parallel_job *jobs,
116  size_t job_count
117)
118{
119  rtems_status_code sc;
120  size_t worker_index;
121  rtems_task_priority worker_priority;
122
123  _Atomic_Init_ulong(&ctx->stop, 0);
124  _SMP_barrier_Control_initialize(&ctx->barrier);
125  ctx->worker_count = rtems_get_processor_count();
126  ctx->worker_ids[0] = rtems_task_self();
127
128  if (RTEMS_ARRAY_SIZE(ctx->worker_ids) < ctx->worker_count) {
129    rtems_fatal_error_occurred(0xdeadbeef);
130  }
131
132  sc = rtems_task_set_priority(
133    RTEMS_SELF,
134    RTEMS_CURRENT_PRIORITY,
135    &worker_priority
136  );
137  if (sc != RTEMS_SUCCESSFUL) {
138    rtems_fatal_error_occurred(0xdeadbeef);
139  }
140
141  sc = rtems_timer_create(
142    rtems_build_name('S', 'T', 'O', 'P'),
143    &ctx->stop_worker_timer_id
144  );
145  if (sc != RTEMS_SUCCESSFUL) {
146    rtems_fatal_error_occurred(0xdeadbeef);
147  }
148
149  for (worker_index = 1; worker_index < ctx->worker_count; ++worker_index) {
150    worker_arg warg = {
151      .ctx = ctx,
152      .jobs = jobs,
153      .job_count = job_count,
154      .worker_index = worker_index
155    };
156    rtems_id worker_id;
157
158    sc = rtems_task_create(
159      rtems_build_name('W', 'O', 'R', 'K'),
160      worker_priority,
161      RTEMS_MINIMUM_STACK_SIZE,
162      RTEMS_DEFAULT_MODES,
163      RTEMS_DEFAULT_ATTRIBUTES,
164      &worker_id
165    );
166    if (sc != RTEMS_SUCCESSFUL) {
167      rtems_fatal_error_occurred(0xdeadbeef);
168    }
169
170    ctx->worker_ids[worker_index] = worker_id;
171
172    if (worker_setup != NULL) {
173      (*worker_setup)(ctx, worker_index, worker_id);
174    }
175
176    sc = rtems_task_start(worker_id, worker_task, (rtems_task_argument) &warg);
177    _Assert(sc == RTEMS_SUCCESSFUL);
178
179    sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
180    _Assert(sc == RTEMS_SUCCESSFUL);
181  }
182
183  run_tests(ctx, jobs, job_count, 0);
184
185  for (worker_index = 1; worker_index < ctx->worker_count; ++worker_index) {
186    sc = rtems_task_delete(ctx->worker_ids[worker_index]);
187    _Assert(sc == RTEMS_SUCCESSFUL);
188  }
189
190  sc = rtems_timer_delete(ctx->stop_worker_timer_id);
191  _Assert(sc == RTEMS_SUCCESSFUL);
192}
Note: See TracBrowser for help on using the repository browser.