source: rtems/testsuites/tmtests/tmfine01/init.c @ 8c7eb00

4.115
Last change on this file since 8c7eb00 was 8c7eb00, checked in by Sebastian Huber <sebastian.huber@…>, on 03/17/15 at 09:32:20

testsupport: Add worker setup handler

Add rtems_test_parallel_get_task_id().

  • Property mode set to 100644
File size: 10.0 KB
Line 
1/*
2 * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#include "tmacros.h"
20
21#include <stdio.h>
22#include <inttypes.h>
23
24#include <rtems/test.h>
25
26const char rtems_test_name[] = "TMFINE 1";
27
28#define CPU_COUNT 32
29
30#define MSG_COUNT 3
31
32typedef struct {
33  uint32_t value;
34} test_msg;
35
36typedef struct {
37  rtems_test_parallel_context base;
38  rtems_id master;
39  rtems_id sema[CPU_COUNT];
40  rtems_id mq[CPU_COUNT];
41  uint32_t self_event_ops[CPU_COUNT][CPU_COUNT];
42  uint32_t all_to_one_event_ops[CPU_COUNT][CPU_COUNT];
43  uint32_t one_mutex_ops[CPU_COUNT][CPU_COUNT];
44  uint32_t many_mutex_ops[CPU_COUNT][CPU_COUNT];
45  uint32_t self_msg_ops[CPU_COUNT][CPU_COUNT];
46  uint32_t many_to_one_msg_ops[CPU_COUNT][CPU_COUNT];
47} test_context;
48
49static test_context test_instance;
50
51static rtems_interval test_duration(void)
52{
53  return rtems_clock_get_ticks_per_second();
54}
55
56static rtems_interval test_init(
57  rtems_test_parallel_context *base,
58  void *arg,
59  size_t active_workers
60)
61{
62  return test_duration();
63}
64
65static void test_fini(
66  const char *name,
67  uint32_t *counters,
68  size_t active_workers
69)
70{
71  size_t i;
72
73  printf("  <%s activeWorker=\"%zu\">\n", name, active_workers);
74
75  for (i = 0; i < active_workers; ++i) {
76    printf(
77      "    <Counter worker=\"%zu\">%" PRIu32 "</Counter>\n",
78      i,
79      counters[i]
80    );
81  }
82
83  printf("  </%s>\n", name);
84}
85
86static void test_self_event_body(
87  rtems_test_parallel_context *base,
88  void *arg,
89  size_t active_workers,
90  size_t worker_index
91)
92{
93  test_context *ctx = (test_context *) base;
94  rtems_id id = rtems_task_self();
95  uint32_t counter = 0;
96
97  while (!rtems_test_parallel_stop_job(&ctx->base)) {
98    rtems_status_code sc;
99    rtems_event_set out;
100
101    ++counter;
102
103    sc = rtems_event_send(id, RTEMS_EVENT_0);
104    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
105
106    sc = rtems_event_receive(
107      RTEMS_EVENT_0,
108      RTEMS_WAIT | RTEMS_EVENT_ANY,
109      RTEMS_NO_TIMEOUT,
110      &out
111    );
112    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
113  }
114
115  ctx->self_event_ops[active_workers - 1][worker_index] = counter;
116}
117
118static void test_self_event_fini(
119  rtems_test_parallel_context *base,
120  void *arg,
121  size_t active_workers
122)
123{
124  test_context *ctx = (test_context *) base;
125
126  test_fini(
127    "SelfEvent",
128    &ctx->self_event_ops[active_workers - 1][0],
129    active_workers
130  );
131}
132
133static void test_all_to_one_event_body(
134  rtems_test_parallel_context *base,
135  void *arg,
136  size_t active_workers,
137  size_t worker_index
138)
139{
140  test_context *ctx = (test_context *) base;
141  rtems_id id = rtems_task_self();
142  bool is_master = rtems_test_parallel_is_master_worker(worker_index);
143  uint32_t counter = 0;
144
145  while (!rtems_test_parallel_stop_job(&ctx->base)) {
146    rtems_status_code sc;
147
148    ++counter;
149
150    sc = rtems_event_send(id, RTEMS_EVENT_0);
151    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
152
153    if (is_master) {
154      rtems_event_set out;
155
156      sc = rtems_event_receive(
157        RTEMS_ALL_EVENTS,
158        RTEMS_WAIT | RTEMS_EVENT_ANY,
159        RTEMS_NO_TIMEOUT,
160        &out
161      );
162      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
163    }
164  }
165
166  ctx->all_to_one_event_ops[active_workers - 1][worker_index] = counter;
167}
168
169static void test_all_to_one_event_fini(
170  rtems_test_parallel_context *base,
171  void *arg,
172  size_t active_workers
173)
174{
175  test_context *ctx = (test_context *) base;
176
177  test_fini(
178    "AllToOneEvent",
179    &ctx->all_to_one_event_ops[active_workers - 1][0],
180    active_workers
181  );
182}
183
184static void test_one_mutex_body(
185  rtems_test_parallel_context *base,
186  void *arg,
187  size_t active_workers,
188  size_t worker_index
189)
190{
191  test_context *ctx = (test_context *) base;
192  rtems_id id = ctx->sema[0];
193  uint32_t counter = 0;
194
195  while (!rtems_test_parallel_stop_job(&ctx->base)) {
196    rtems_status_code sc;
197
198    ++counter;
199
200    sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
201    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
202
203    sc = rtems_semaphore_release(id);
204    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
205  }
206
207  ctx->one_mutex_ops[active_workers - 1][worker_index] = counter;
208}
209
210static void test_one_mutex_fini(
211  rtems_test_parallel_context *base,
212  void *arg,
213  size_t active_workers
214)
215{
216  test_context *ctx = (test_context *) base;
217
218  test_fini(
219    "OneMutex",
220    &ctx->one_mutex_ops[active_workers - 1][0],
221    active_workers
222  );
223}
224
225static void test_many_mutex_body(
226  rtems_test_parallel_context *base,
227  void *arg,
228  size_t active_workers,
229  size_t worker_index
230)
231{
232  test_context *ctx = (test_context *) base;
233  rtems_id id = ctx->sema[worker_index];
234  uint32_t counter = 0;
235
236  while (!rtems_test_parallel_stop_job(&ctx->base)) {
237    rtems_status_code sc;
238
239    ++counter;
240
241    sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
242    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
243
244    sc = rtems_semaphore_release(id);
245    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
246  }
247
248  ctx->many_mutex_ops[active_workers - 1][worker_index] = counter;
249}
250
251static void test_many_mutex_fini(
252  rtems_test_parallel_context *base,
253  void *arg,
254  size_t active_workers
255)
256{
257  test_context *ctx = (test_context *) base;
258
259  test_fini(
260    "ManyMutex",
261    &ctx->many_mutex_ops[active_workers - 1][0],
262    active_workers
263  );
264}
265
266static void test_self_msg_body(
267  rtems_test_parallel_context *base,
268  void *arg,
269  size_t active_workers,
270  size_t worker_index
271)
272{
273  test_context *ctx = (test_context *) base;
274  rtems_id id = ctx->mq[worker_index];
275  uint32_t counter = 0;
276
277  while (!rtems_test_parallel_stop_job(&ctx->base)) {
278    rtems_status_code sc;
279    test_msg msg = { .value = 0 };
280    size_t n;
281
282    ++counter;
283
284    sc = rtems_message_queue_send(id, &msg, sizeof(msg));
285    rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_TOO_MANY);
286
287    n = sizeof(msg);
288    sc = rtems_message_queue_receive(
289      id,
290      &msg,
291      &n,
292      RTEMS_WAIT,
293      RTEMS_NO_TIMEOUT
294    );
295    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
296    rtems_test_assert(n == sizeof(msg));
297  }
298
299  ctx->self_msg_ops[active_workers - 1][worker_index] = counter;
300}
301
302static void test_self_msg_fini(
303  rtems_test_parallel_context *base,
304  void *arg,
305  size_t active_workers
306)
307{
308  test_context *ctx = (test_context *) base;
309
310  test_fini(
311    "SelfMsg",
312    &ctx->self_msg_ops[active_workers - 1][0],
313    active_workers
314  );
315}
316
317static void test_many_to_one_msg_body(
318  rtems_test_parallel_context *base,
319  void *arg,
320  size_t active_workers,
321  size_t worker_index
322)
323{
324  test_context *ctx = (test_context *) base;
325  rtems_id id = ctx->mq[0];
326  bool is_master = rtems_test_parallel_is_master_worker(worker_index);
327  uint32_t counter = 0;
328
329  while (!rtems_test_parallel_stop_job(&ctx->base)) {
330    rtems_status_code sc;
331    test_msg msg = { .value = 0 };
332    size_t n;
333
334    ++counter;
335
336    sc = rtems_message_queue_send(id, &msg, sizeof(msg));
337    rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_TOO_MANY);
338
339    if (is_master) {
340      n = sizeof(msg);
341      sc = rtems_message_queue_receive(
342        id,
343        &msg,
344        &n,
345        RTEMS_WAIT,
346        RTEMS_NO_TIMEOUT
347      );
348      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
349      rtems_test_assert(n == sizeof(msg));
350    }
351  }
352
353  ctx->many_to_one_msg_ops[active_workers - 1][worker_index] = counter;
354}
355
356static void test_many_to_one_msg_fini(
357  rtems_test_parallel_context *base,
358  void *arg,
359  size_t active_workers
360)
361{
362  test_context *ctx = (test_context *) base;
363
364  test_fini(
365    "ManyToOneMsg",
366    &ctx->many_to_one_msg_ops[active_workers - 1][0],
367    active_workers
368  );
369}
370
371static const rtems_test_parallel_job test_jobs[] = {
372  {
373    .init = test_init,
374    .body = test_self_event_body,
375    .fini = test_self_event_fini,
376    .cascade = true
377  }, {
378    .init = test_init,
379    .body = test_all_to_one_event_body,
380    .fini = test_all_to_one_event_fini,
381    .cascade = true
382  }, {
383    .init = test_init,
384    .body = test_one_mutex_body,
385    .fini = test_one_mutex_fini,
386    .cascade = true
387  }, {
388    .init = test_init,
389    .body = test_many_mutex_body,
390    .fini = test_many_mutex_fini,
391    .cascade = true
392  }, {
393    .init = test_init,
394    .body = test_self_msg_body,
395    .fini = test_self_msg_fini,
396    .cascade = true
397  }, {
398    .init = test_init,
399    .body = test_many_to_one_msg_body,
400    .fini = test_many_to_one_msg_fini,
401    .cascade = true
402  }
403};
404
405static void Init(rtems_task_argument arg)
406{
407  test_context *ctx = &test_instance;
408  const char *test = "TestTimeFine01";
409  size_t i;
410
411  TEST_BEGIN();
412
413  ctx->master = rtems_task_self();
414
415  for (i = 0; i < CPU_COUNT; ++i) {
416    rtems_status_code sc;
417
418    sc = rtems_semaphore_create(
419      rtems_build_name('T', 'E', 'S', 'T'),
420      1,
421      RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY | RTEMS_PRIORITY,
422      0,
423      &ctx->sema[i]
424    );
425    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
426
427    sc = rtems_message_queue_create(
428      rtems_build_name('T', 'E', 'S', 'T'),
429      MSG_COUNT,
430      sizeof(test_msg),
431      RTEMS_DEFAULT_ATTRIBUTES,
432      &ctx->mq[i]
433    );
434    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
435  }
436
437  printf("<%s>\n", test);
438
439  rtems_test_parallel(
440    &ctx->base,
441    NULL,
442    &test_jobs[0],
443    RTEMS_ARRAY_SIZE(test_jobs)
444  );
445
446  printf("</%s>\n", test);
447
448  TEST_END();
449  rtems_test_exit(0);
450}
451
452#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
453#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
454
455#define CONFIGURE_USE_IMFS_AS_BASE_FILESYSTEM
456
457#define CONFIGURE_MAXIMUM_TASKS CPU_COUNT
458
459#define CONFIGURE_MAXIMUM_TIMERS 1
460
461#define CONFIGURE_MAXIMUM_SEMAPHORES CPU_COUNT
462
463#define CONFIGURE_MAXIMUM_MESSAGE_QUEUES CPU_COUNT
464
465#define CONFIGURE_MESSAGE_BUFFER_MEMORY \
466  CONFIGURE_MESSAGE_BUFFERS_FOR_QUEUE(MSG_COUNT, sizeof(test_msg))
467
468#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
469
470#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
471
472#define CONFIGURE_SMP_APPLICATION
473
474#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
475
476#define CONFIGURE_INIT
477
478#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.