source: rtems/cpukit/libtest/testparallel.c @ 6fe01e4b

5
Last change on this file since 6fe01e4b was 6fe01e4b, checked in by Sebastian Huber <sebastian.huber@…>, on 01/14/19 at 08:08:18

build: Move test support to librtemstest.a

One reason to move the test support into a dedicated library are the
standard output wrap_*() functions. They may conflict with
application level wrappers.

Update #3199.

  • Property mode set to 100644
File size: 4.8 KB
Line 
1/*
2 * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15
16#ifdef HAVE_CONFIG_H
17  #include "config.h"
18#endif
19
20#include <rtems/test.h>
21#include <rtems/score/assert.h>
22#include <rtems.h>
23
24static void stop_worker_timer(rtems_id timer_id, void *arg)
25{
26  rtems_test_parallel_context *ctx = arg;
27
28  _Atomic_Store_ulong(&ctx->stop, 1, ATOMIC_ORDER_RELAXED);
29}
30
31static void start_worker_stop_timer(
32  rtems_test_parallel_context *ctx,
33  rtems_interval duration
34)
35{
36  rtems_status_code sc;
37
38  _Atomic_Store_ulong(&ctx->stop, 0, ATOMIC_ORDER_RELEASE);
39
40  sc = rtems_timer_fire_after(
41    ctx->stop_worker_timer_id,
42    duration,
43    stop_worker_timer,
44    ctx
45  );
46  _Assert(sc == RTEMS_SUCCESSFUL);
47  (void) sc;
48}
49
50static void run_tests(
51  rtems_test_parallel_context *ctx,
52  const rtems_test_parallel_job *jobs,
53  size_t job_count
54)
55{
56  SMP_barrier_State bs = SMP_BARRIER_STATE_INITIALIZER;
57  size_t i;
58
59  _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
60
61  for (i = 0; i < job_count; ++i) {
62    const rtems_test_parallel_job *job = &jobs[i];
63    size_t n = rtems_get_processor_count();
64    size_t j = job->cascade ? 0 : rtems_get_processor_count() - 1;
65
66    while (j < n) {
67      size_t active_worker = j + 1;
68      size_t worker_index;
69      rtems_interrupt_level level;
70
71      /*
72       * Determine worker index via the current processor index to get
73       * consistent job runs with respect to the cache topology.
74       */
75      rtems_interrupt_local_disable(level);
76      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
77      worker_index = rtems_get_current_processor();
78      rtems_interrupt_local_enable(level);
79
80      _Assert(worker_index < ctx->worker_count);
81
82      if (rtems_test_parallel_is_master_worker(worker_index)) {
83        rtems_interval duration = (*job->init)(ctx, job->arg, active_worker);
84
85        if (duration > 0) {
86          start_worker_stop_timer(ctx, duration);
87        }
88      }
89
90      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
91
92      if (worker_index <= j) {
93        (*job->body)(ctx, job->arg, active_worker, worker_index);
94      }
95
96      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
97
98      if (rtems_test_parallel_is_master_worker(worker_index)) {
99        (*job->fini)(ctx, job->arg, active_worker);
100      }
101
102      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
103
104      ++j;
105    }
106  }
107}
108
109static void worker_task(rtems_task_argument arg)
110{
111  rtems_test_parallel_context *ctx = (rtems_test_parallel_context *) arg;
112  rtems_status_code sc;
113
114  (void) sc;
115
116  run_tests(ctx, ctx->jobs, ctx->job_count);
117
118  while (true) {
119    /* Wait for delete by master worker */
120  }
121}
122
123void rtems_test_parallel(
124  rtems_test_parallel_context *ctx,
125  rtems_test_parallel_worker_setup worker_setup,
126  const rtems_test_parallel_job *jobs,
127  size_t job_count
128)
129{
130  rtems_status_code sc;
131  size_t worker_index;
132  rtems_task_priority worker_priority;
133
134  _Atomic_Init_ulong(&ctx->stop, 0);
135  _SMP_barrier_Control_initialize(&ctx->barrier);
136  ctx->worker_count = rtems_get_processor_count();
137  ctx->worker_ids[0] = rtems_task_self();
138  ctx->jobs = jobs;
139  ctx->job_count = job_count;
140
141  if (RTEMS_ARRAY_SIZE(ctx->worker_ids) < ctx->worker_count) {
142    rtems_fatal_error_occurred(0xdeadbeef);
143  }
144
145  sc = rtems_task_set_priority(
146    RTEMS_SELF,
147    RTEMS_CURRENT_PRIORITY,
148    &worker_priority
149  );
150  if (sc != RTEMS_SUCCESSFUL) {
151    rtems_fatal_error_occurred(0xdeadbeef);
152  }
153
154  sc = rtems_timer_create(
155    rtems_build_name('S', 'T', 'O', 'P'),
156    &ctx->stop_worker_timer_id
157  );
158  if (sc != RTEMS_SUCCESSFUL) {
159    rtems_fatal_error_occurred(0xdeadbeef);
160  }
161
162  for (worker_index = 1; worker_index < ctx->worker_count; ++worker_index) {
163    rtems_id worker_id;
164
165    sc = rtems_task_create(
166      rtems_build_name('W', 'O', 'R', 'K'),
167      worker_priority,
168      RTEMS_MINIMUM_STACK_SIZE,
169      RTEMS_DEFAULT_MODES,
170      RTEMS_DEFAULT_ATTRIBUTES,
171      &worker_id
172    );
173    if (sc != RTEMS_SUCCESSFUL) {
174      rtems_fatal_error_occurred(0xdeadbeef);
175    }
176
177    ctx->worker_ids[worker_index] = worker_id;
178
179    if (worker_setup != NULL) {
180      (*worker_setup)(ctx, worker_index, worker_id);
181    }
182
183    sc = rtems_task_start(worker_id, worker_task, (rtems_task_argument) ctx);
184    _Assert(sc == RTEMS_SUCCESSFUL);
185  }
186
187  run_tests(ctx, jobs, job_count);
188
189  for (worker_index = 1; worker_index < ctx->worker_count; ++worker_index) {
190    sc = rtems_task_delete(ctx->worker_ids[worker_index]);
191    _Assert(sc == RTEMS_SUCCESSFUL);
192  }
193
194  sc = rtems_timer_delete(ctx->stop_worker_timer_id);
195  _Assert(sc == RTEMS_SUCCESSFUL);
196}
Note: See TracBrowser for help on using the repository browser.