source: rtems/testsuites/smptests/smpatomic01/init.c @ a9cc6a84

5
Last change on this file since a9cc6a84 was a9cc6a84, checked in by Sebastian Huber <sebastian.huber@…>, on 06/01/16 at 07:50:44

smptests/smpatomic01: New test cases

Demonstrate that a read-modify-write atomic operation may be necessary
on some archtitectures to observe the latest value written.

  • Property mode set to 100644
File size: 14.2 KB
Line 
1/*
2 * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * Copyright (c) 2013 Deng Hengyi.
11 *
12 * The license and distribution terms for this file may be
13 * found in the file LICENSE in this distribution or at
14 * http://www.rtems.org/license/LICENSE.
15 */
16
17#ifdef HAVE_CONFIG_H
18  #include "config.h"
19#endif
20
21#include <rtems/score/atomic.h>
22#include <rtems/score/smpbarrier.h>
23#include <rtems.h>
24#include <rtems/bsd.h>
25#include <rtems/test.h>
26#include <limits.h>
27#include <string.h>
28
29#include "tmacros.h"
30
31const char rtems_test_name[] = "SMPATOMIC 1";
32
33#define MS_PER_TICK 10
34
35#define MASTER_PRIORITY 1
36
37#define WORKER_PRIORITY 2
38
39#define CPU_COUNT 32
40
41typedef struct {
42  rtems_test_parallel_context base;
43  Atomic_Ulong atomic_value;
44  unsigned long per_worker_value[CPU_COUNT];
45  unsigned long normal_value;
46  char unused_space_for_cache_line_separation[128];
47  unsigned long second_value;
48  Atomic_Flag global_flag;
49  SMP_barrier_Control barrier;
50  SMP_barrier_State barrier_state[CPU_COUNT];
51  sbintime_t load_trigger_time;
52  sbintime_t load_change_time[CPU_COUNT];
53  int load_count[CPU_COUNT];
54  sbintime_t rmw_trigger_time;
55  sbintime_t rmw_change_time[CPU_COUNT];
56  int rmw_count[CPU_COUNT];
57} smpatomic01_context;
58
59static smpatomic01_context test_instance;
60
61static rtems_interval test_duration(void)
62{
63  return rtems_clock_get_ticks_per_second();
64}
65
66static void test_fini(
67  smpatomic01_context *ctx,
68  const char *test,
69  bool atomic
70)
71{
72  unsigned long expected_value = 0;
73  unsigned long actual_value;
74  size_t worker_index;
75
76  printf("=== atomic %s test case ===\n", test);
77
78  for (
79    worker_index = 0;
80    worker_index < ctx->base.worker_count;
81    ++worker_index
82  ) {
83    unsigned long worker_value = ctx->per_worker_value[worker_index];
84
85    expected_value += worker_value;
86
87    printf(
88      "worker %zu value: %lu\n",
89      worker_index,
90      worker_value
91    );
92  }
93
94  if (atomic) {
95    actual_value = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
96  } else {
97    actual_value = ctx->normal_value;
98  }
99
100  printf(
101    "atomic value: expected = %lu, actual = %lu\n",
102    expected_value,
103    actual_value
104  );
105
106  rtems_test_assert(expected_value == actual_value);
107}
108
109
110static rtems_interval test_atomic_add_init(
111  rtems_test_parallel_context *base,
112  void *arg,
113  size_t active_workers
114)
115{
116  smpatomic01_context *ctx = (smpatomic01_context *) base;
117
118  _Atomic_Init_ulong(&ctx->atomic_value, 0);
119
120  return test_duration();
121}
122
123static void test_atomic_add_body(
124  rtems_test_parallel_context *base,
125  void *arg,
126  size_t active_workers,
127  size_t worker_index
128)
129{
130  smpatomic01_context *ctx = (smpatomic01_context *) base;
131  unsigned long counter = 0;
132
133  while (!rtems_test_parallel_stop_job(&ctx->base)) {
134    ++counter;
135    _Atomic_Fetch_add_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
136  }
137
138  ctx->per_worker_value[worker_index] = counter;
139}
140
141static void test_atomic_add_fini(
142  rtems_test_parallel_context *base,
143  void *arg,
144  size_t active_workers
145)
146{
147  smpatomic01_context *ctx = (smpatomic01_context *) base;
148
149  test_fini(ctx, "add", true);
150}
151
152static rtems_interval test_atomic_flag_init(
153  rtems_test_parallel_context *base,
154  void *arg,
155  size_t active_workers
156)
157{
158  smpatomic01_context *ctx = (smpatomic01_context *) base;
159
160  _Atomic_Flag_clear(&ctx->global_flag, ATOMIC_ORDER_RELEASE);
161  ctx->normal_value = 0;
162
163  return test_duration();
164}
165
166static void test_atomic_flag_body(
167  rtems_test_parallel_context *base,
168  void *arg,
169  size_t active_workers,
170  size_t worker_index
171)
172{
173  smpatomic01_context *ctx = (smpatomic01_context *) base;
174  unsigned long counter = 0;
175
176  while (!rtems_test_parallel_stop_job(&ctx->base)) {
177    while (_Atomic_Flag_test_and_set(&ctx->global_flag, ATOMIC_ORDER_ACQUIRE)) {
178      /* Wait */
179    }
180
181    ++counter;
182    ++ctx->normal_value;
183
184    _Atomic_Flag_clear(&ctx->global_flag, ATOMIC_ORDER_RELEASE);
185  }
186
187  ctx->per_worker_value[worker_index] = counter;
188}
189
190static void test_atomic_flag_fini(
191  rtems_test_parallel_context *base,
192  void *arg,
193  size_t active_workers
194  )
195{
196  smpatomic01_context *ctx = (smpatomic01_context *) base;
197
198  test_fini(ctx, "flag", false);
199}
200
201static rtems_interval test_atomic_sub_init(
202  rtems_test_parallel_context *base,
203  void *arg,
204  size_t active_workers
205)
206{
207  smpatomic01_context *ctx = (smpatomic01_context *) base;
208
209  _Atomic_Init_ulong(&ctx->atomic_value, 0);
210
211  return test_duration();
212}
213
214static void test_atomic_sub_body(
215  rtems_test_parallel_context *base,
216  void *arg,
217  size_t active_workers,
218  size_t worker_index
219)
220{
221  smpatomic01_context *ctx = (smpatomic01_context *) base;
222  unsigned long counter = 0;
223
224  while (!rtems_test_parallel_stop_job(&ctx->base)) {
225    --counter;
226    _Atomic_Fetch_sub_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
227  }
228
229  ctx->per_worker_value[worker_index] = counter;
230}
231
232static void test_atomic_sub_fini(
233  rtems_test_parallel_context *base,
234  void *arg,
235  size_t active_workers
236)
237{
238  smpatomic01_context *ctx = (smpatomic01_context *) base;
239
240  test_fini(ctx, "sub", true);
241}
242
243static rtems_interval test_atomic_compare_exchange_init(
244  rtems_test_parallel_context *base,
245  void *arg,
246  size_t active_workers
247)
248{
249  smpatomic01_context *ctx = (smpatomic01_context *) base;
250
251  _Atomic_Init_ulong(&ctx->atomic_value, 0);
252  ctx->normal_value = 0;
253
254  return test_duration();
255}
256
257static void test_atomic_compare_exchange_body(
258  rtems_test_parallel_context *base,
259  void *arg,
260  size_t active_workers,
261  size_t worker_index
262)
263{
264  smpatomic01_context *ctx = (smpatomic01_context *) base;
265  unsigned long counter = 0;
266
267  while (!rtems_test_parallel_stop_job(&ctx->base)) {
268    bool success;
269
270    do {
271      unsigned long zero = 0;
272
273      success = _Atomic_Compare_exchange_ulong(
274        &ctx->atomic_value,
275        &zero,
276        1,
277        ATOMIC_ORDER_ACQUIRE,
278        ATOMIC_ORDER_RELAXED
279      );
280    } while (!success);
281
282    ++counter;
283    ++ctx->normal_value;
284
285    _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
286  }
287
288  ctx->per_worker_value[worker_index] = counter;
289}
290
291static void test_atomic_compare_exchange_fini(
292  rtems_test_parallel_context *base,
293  void *arg,
294  size_t active_workers
295)
296{
297  smpatomic01_context *ctx = (smpatomic01_context *) base;
298
299  test_fini(ctx, "compare exchange", false);
300}
301
302static rtems_interval test_atomic_or_and_init(
303  rtems_test_parallel_context *base,
304  void *arg,
305  size_t active_workers
306)
307{
308  smpatomic01_context *ctx = (smpatomic01_context *) base;
309
310  _Atomic_Init_ulong(&ctx->atomic_value, 0);
311
312  return test_duration();
313}
314
315static void test_atomic_or_and_body(
316  rtems_test_parallel_context *base,
317  void *arg,
318  size_t active_workers,
319  size_t worker_index
320)
321{
322  smpatomic01_context *ctx = (smpatomic01_context *) base;
323  unsigned long the_bit = 1UL << worker_index;
324  unsigned long current_bit = 0;
325
326  while (!rtems_test_parallel_stop_job(&ctx->base)) {
327    unsigned long previous;
328
329    if (current_bit != 0) {
330      previous = _Atomic_Fetch_and_ulong(
331        &ctx->atomic_value,
332        ~the_bit,
333        ATOMIC_ORDER_RELAXED
334      );
335      current_bit = 0;
336    } else {
337      previous = _Atomic_Fetch_or_ulong(
338        &ctx->atomic_value,
339        the_bit,
340        ATOMIC_ORDER_RELAXED
341      );
342      current_bit = the_bit;
343    }
344
345    rtems_test_assert((previous & the_bit) != current_bit);
346  }
347
348  ctx->per_worker_value[worker_index] = current_bit;
349}
350
351static void test_atomic_or_and_fini(
352  rtems_test_parallel_context *base,
353  void *arg,
354  size_t active_workers
355)
356{
357  smpatomic01_context *ctx = (smpatomic01_context *) base;
358
359  test_fini(ctx, "or/and", true);
360}
361
362static rtems_interval test_atomic_fence_init(
363  rtems_test_parallel_context *base,
364  void *arg,
365  size_t active_workers
366)
367{
368  smpatomic01_context *ctx = (smpatomic01_context *) base;
369
370  ctx->normal_value = 0;
371  ctx->second_value = 0;
372  _Atomic_Fence(ATOMIC_ORDER_RELEASE);
373
374  return test_duration();
375}
376
377static void test_atomic_fence_body(
378  rtems_test_parallel_context *base,
379  void *arg,
380  size_t active_workers,
381  size_t worker_index
382)
383{
384  smpatomic01_context *ctx = (smpatomic01_context *) base;
385
386  if (rtems_test_parallel_is_master_worker(worker_index)) {
387    unsigned long counter = 0;
388
389    while (!rtems_test_parallel_stop_job(&ctx->base)) {
390      ++counter;
391      ctx->normal_value = counter;
392      _Atomic_Fence(ATOMIC_ORDER_RELEASE);
393      ctx->second_value = counter;
394    }
395  } else {
396    while (!rtems_test_parallel_stop_job(&ctx->base)) {
397      unsigned long n;
398      unsigned long s;
399
400      s = ctx->second_value;
401      _Atomic_Fence(ATOMIC_ORDER_ACQUIRE);
402      n = ctx->normal_value;
403
404      rtems_test_assert(n - s < LONG_MAX);
405    }
406  }
407}
408
409static void test_atomic_fence_fini(
410  rtems_test_parallel_context *base,
411  void *arg,
412  size_t active_workers
413)
414{
415  smpatomic01_context *ctx = (smpatomic01_context *) base;
416
417  printf(
418    "=== atomic fence test case ===\n"
419    "normal value = %lu, second value = %lu\n",
420    ctx->normal_value,
421    ctx->second_value
422  );
423}
424
425static rtems_interval test_atomic_store_load_rmw_init(
426  rtems_test_parallel_context *base,
427  void *arg,
428  size_t active_workers
429)
430{
431  smpatomic01_context *ctx = (smpatomic01_context *) base;
432  size_t i;
433
434  _Atomic_Init_ulong(&ctx->atomic_value, 0);
435
436  _SMP_barrier_Control_initialize(&ctx->barrier);
437
438  for (i = 0; i < active_workers; ++i) {
439    _SMP_barrier_State_initialize(&ctx->barrier_state[i]);
440  }
441
442  return 0;
443}
444
445static sbintime_t now(void)
446{
447  struct bintime bt;
448
449  rtems_bsd_binuptime(&bt);
450  return bttosbt(bt);
451}
452
453static void test_atomic_store_load_rmw_body(
454  rtems_test_parallel_context *base,
455  void *arg,
456  size_t active_workers,
457  size_t worker_index
458)
459{
460  smpatomic01_context *ctx = (smpatomic01_context *) base;
461  uint32_t cpu_self_index;
462  sbintime_t t;
463  int counter;
464
465  if (rtems_test_parallel_is_master_worker(worker_index)) {
466    rtems_status_code sc;
467
468    sc = rtems_task_wake_after(1);
469    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
470
471    t = now();
472    t += (MS_PER_TICK / 2) * SBT_1MS;
473    ctx->load_trigger_time = t;
474    t += MS_PER_TICK * SBT_1MS;
475    ctx->rmw_trigger_time = t;
476  }
477
478  _Atomic_Fence(ATOMIC_ORDER_SEQ_CST);
479
480  _SMP_barrier_Wait(
481    &ctx->barrier,
482    &ctx->barrier_state[worker_index],
483    active_workers
484  );
485
486  /*
487   * Use the physical processor index, to observe timing differences introduced
488   * by the system topology.
489   */
490  cpu_self_index = rtems_get_current_processor();
491
492  /* Store release and load acquire test case */
493
494  counter = 0;
495  t = ctx->load_trigger_time;
496
497  while (now() < t) {
498    /* Wait */
499  }
500
501  if (cpu_self_index == 0) {
502    _Atomic_Store_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELEASE);
503  } else {
504    while (_Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_ACQUIRE) == 0) {
505      ++counter;
506    }
507  }
508
509  ctx->load_change_time[cpu_self_index] = now();
510  ctx->load_count[cpu_self_index] = counter;
511
512  /* Read-modify-write test case */
513
514  if (cpu_self_index == 0) {
515    _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELAXED);
516  }
517
518  counter = 0;
519  t = ctx->rmw_trigger_time;
520
521  while (now() < t) {
522    /* Wait */
523  }
524
525  if (cpu_self_index == 0) {
526    _Atomic_Store_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
527  } else {
528    while (
529      (_Atomic_Fetch_or_ulong(&ctx->atomic_value, 2, ATOMIC_ORDER_RELAXED) & 1)
530        == 0
531    ) {
532      ++counter;
533    }
534  }
535
536  ctx->rmw_change_time[cpu_self_index] = now();
537  ctx->rmw_count[cpu_self_index] = counter;
538}
539
540static void test_atomic_store_load_rmw_fini(
541  rtems_test_parallel_context *base,
542  void *arg,
543  size_t active_workers
544)
545{
546  smpatomic01_context *ctx = (smpatomic01_context *) base;
547  size_t i;
548  struct bintime bt;
549  struct timespec ts;
550
551  printf("=== atomic store release and load acquire test case ===\n");
552
553  for (i = 0; i < active_workers; ++i) {
554    bt = sbttobt(ctx->load_change_time[i] - ctx->load_trigger_time);
555    bintime2timespec(&bt, &ts);
556    printf(
557      "processor %zu delta %lins, load count %i\n",
558      i,
559      ts.tv_nsec,
560      ctx->load_count[i]
561    );
562  }
563
564  printf("=== atomic read-modify-write test case ===\n");
565
566  for (i = 0; i < active_workers; ++i) {
567    bt = sbttobt(ctx->rmw_change_time[i] - ctx->rmw_trigger_time);
568    bintime2timespec(&bt, &ts);
569    printf(
570      "processor %zu delta %lins, read-modify-write count %i\n",
571      i,
572      ts.tv_nsec,
573      ctx->rmw_count[i]
574    );
575  }
576}
577
578static const rtems_test_parallel_job test_jobs[] = {
579  {
580    .init = test_atomic_add_init,
581    .body = test_atomic_add_body,
582    .fini = test_atomic_add_fini
583  }, {
584    .init = test_atomic_flag_init,
585    .body = test_atomic_flag_body,
586    .fini = test_atomic_flag_fini
587  }, {
588    .init = test_atomic_sub_init,
589    .body = test_atomic_sub_body,
590    .fini = test_atomic_sub_fini
591  }, {
592    .init = test_atomic_compare_exchange_init,
593    .body = test_atomic_compare_exchange_body,
594    .fini = test_atomic_compare_exchange_fini
595  }, {
596    .init = test_atomic_or_and_init,
597    .body = test_atomic_or_and_body,
598    .fini = test_atomic_or_and_fini
599  }, {
600    .init = test_atomic_fence_init,
601    .body = test_atomic_fence_body,
602    .fini = test_atomic_fence_fini
603  }, {
604    .init = test_atomic_store_load_rmw_init,
605    .body = test_atomic_store_load_rmw_body,
606    .fini = test_atomic_store_load_rmw_fini
607  }
608};
609
610static void setup_worker(
611  rtems_test_parallel_context *base,
612  size_t worker_index,
613  rtems_id worker_id
614)
615{
616  rtems_status_code sc;
617  rtems_task_priority prio;
618
619  sc = rtems_task_set_priority(worker_id, WORKER_PRIORITY, &prio);
620  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
621}
622
623static void Init(rtems_task_argument arg)
624{
625  smpatomic01_context *ctx = &test_instance;
626
627  TEST_BEGIN();
628
629  rtems_test_parallel(
630    &ctx->base,
631    setup_worker,
632    &test_jobs[0],
633    RTEMS_ARRAY_SIZE(test_jobs)
634  );
635
636  TEST_END();
637  rtems_test_exit(0);
638}
639
640#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
641#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
642
643#define CONFIGURE_MICROSECONDS_PER_TICK (MS_PER_TICK * 1000)
644
645#define CONFIGURE_SMP_APPLICATION
646
647#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
648
649#define CONFIGURE_MAXIMUM_TASKS CPU_COUNT
650
651#define CONFIGURE_MAXIMUM_TIMERS 1
652
653#define CONFIGURE_INIT_TASK_PRIORITY MASTER_PRIORITY
654#define CONFIGURE_INIT_TASK_INITIAL_MODES RTEMS_DEFAULT_MODES
655#define CONFIGURE_INIT_TASK_ATTRIBUTES RTEMS_DEFAULT_ATTRIBUTES
656
657#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
658
659#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
660
661#define CONFIGURE_INIT
662
663#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.