source: rtems/testsuites/smptests/smpmulticast01/init.c @ 72960bc7

Last change on this file since 72960bc7 was 72960bc7, checked in by Sebastian Huber <sebastian.huber@…>, on 08/13/20 at 09:33:47

libtest: Change T_step() and T_assert_step()

Normally, the expected test step must be a compile time constant. Allow
variable expected test steps for the T_step() and T_assert_step(). This
can be used for parameterized test loops with individual fixtures.

Remove the ability to use custom failure messages due to some
implementation constraints.

Update #3199.

  • Property mode set to 100644
File size: 13.3 KB
Line 
1/*
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2019 embedded brains GmbH
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <rtems/score/smpimpl.h>
29#include <rtems/score/atomic.h>
30#include <rtems/score/threaddispatch.h>
31#include <rtems/sysinit.h>
32#include <rtems.h>
33
34#include <string.h>
35
36#include <rtems/test.h>
37#include <tmacros.h>
38
39#define CPU_COUNT 32
40
41const char rtems_test_name[] = "SMPMULTICAST 1";
42
43static const T_config config = {
44  .name = "SMPMultiCast",
45  .putchar = T_putchar_default,
46  .verbosity = T_VERBOSE,
47  .now = T_now_clock
48};
49
50typedef struct {
51  rtems_test_parallel_context base;
52  Atomic_Uint id[CPU_COUNT][CPU_COUNT];
53} test_context;
54
55static test_context test_instance;
56
57static void clear_ids_by_worker(test_context *ctx, size_t worker_index)
58{
59  memset(&ctx->id[worker_index][0], 0, sizeof(ctx->id[worker_index]));
60}
61
62static void unicast_action_irq_disabled(
63  uint32_t cpu_index,
64  SMP_Action_handler handler,
65  void *arg
66)
67{
68  rtems_interrupt_level level;
69
70  rtems_interrupt_local_disable(level);
71  _SMP_Unicast_action(cpu_index, handler, arg);
72  rtems_interrupt_local_enable(level);
73}
74
75static void unicast_action_dispatch_disabled(
76  uint32_t cpu_index,
77  SMP_Action_handler handler,
78  void *arg
79)
80{
81  Per_CPU_Control *cpu_self;
82
83  cpu_self = _Thread_Dispatch_disable();
84  _SMP_Unicast_action(cpu_index, handler, arg);
85  _Thread_Dispatch_enable(cpu_self);
86}
87
88static void multicast_action_irq_disabled(
89  const Processor_mask *targets,
90  SMP_Action_handler handler,
91  void *arg
92)
93{
94  rtems_interrupt_level level;
95
96  rtems_interrupt_local_disable(level);
97  _SMP_Multicast_action(targets, handler, arg);
98  rtems_interrupt_local_enable(level);
99}
100
101static void multicast_action_dispatch_disabled(
102  const Processor_mask *targets,
103  SMP_Action_handler handler,
104  void *arg
105)
106{
107  Per_CPU_Control *cpu_self;
108
109  cpu_self = _Thread_Dispatch_disable();
110  _SMP_Multicast_action(targets, handler, arg);
111  _Thread_Dispatch_enable(cpu_self);
112}
113
114static void broadcast_action_irq_disabled(
115  SMP_Action_handler handler,
116  void *arg
117)
118{
119  rtems_interrupt_level level;
120
121  rtems_interrupt_local_disable(level);
122  _SMP_Broadcast_action(handler, arg);
123  rtems_interrupt_local_enable(level);
124}
125
126static void broadcast_action_dispatch_disabled(
127  SMP_Action_handler handler,
128  void *arg
129)
130{
131  Per_CPU_Control *cpu_self;
132
133  cpu_self = _Thread_Dispatch_disable();
134  _SMP_Broadcast_action(handler, arg);
135  _Thread_Dispatch_enable(cpu_self);
136}
137
138static void action(void *arg)
139{
140  Atomic_Uint *id;
141  uint32_t self;
142  unsigned expected;
143  bool success;
144
145  id = arg;
146  self = rtems_scheduler_get_processor();
147  expected = 0;
148  success = _Atomic_Compare_exchange_uint(
149    &id[self],
150    &expected,
151    self + 1,
152    ATOMIC_ORDER_RELAXED,
153    ATOMIC_ORDER_RELAXED
154  );
155  T_quiet_true(success, "set CPU identifier failed");
156}
157
158static void test_unicast(
159  test_context *ctx,
160  void (*unicast_action)(uint32_t, SMP_Action_handler, void *)
161)
162{
163  uint32_t step;
164  uint32_t i;
165  uint32_t n;
166
167  T_plan(1);
168  step = 0;
169  n = rtems_scheduler_get_processor_maximum();
170
171  for (i = 0; i < n; ++i) {
172    uint32_t j;
173
174    clear_ids_by_worker(ctx, 0);
175
176    (*unicast_action)(i, action, &ctx->id[0][0]);
177
178    for (j = 0; j < n; ++j) {
179      unsigned id;
180
181      ++step;
182      id = _Atomic_Load_uint(&ctx->id[0][j], ATOMIC_ORDER_RELAXED);
183
184      if (j == i) {
185        T_quiet_eq_uint(j + 1, id);
186      } else {
187        T_quiet_eq_uint(0, id);
188      }
189    }
190  }
191
192  T_step_eq_u32(0, step, n * n);
193}
194
195static void test_multicast(
196  test_context *ctx,
197  void (*multicast_action)(const Processor_mask *, SMP_Action_handler, void *)
198)
199{
200  uint32_t step;
201  uint32_t i;
202  uint32_t n;
203
204  T_plan(1);
205  step = 0;
206  n = rtems_scheduler_get_processor_maximum();
207
208  for (i = 0; i < n; ++i) {
209    Processor_mask cpus;
210    uint32_t j;
211
212    clear_ids_by_worker(ctx, 0);
213
214    _Processor_mask_Zero(&cpus);
215    _Processor_mask_Set(&cpus, i);
216    (*multicast_action)(&cpus, action, &ctx->id[0][0]);
217
218    for (j = 0; j < n; ++j) {
219      unsigned id;
220
221      ++step;
222      id = _Atomic_Load_uint(&ctx->id[0][j], ATOMIC_ORDER_RELAXED);
223
224      if (j == i) {
225        T_quiet_eq_uint(j + 1, id);
226      } else {
227        T_quiet_eq_uint(0, id);
228      }
229    }
230  }
231
232  T_step_eq_u32(0, step, n * n);
233}
234
235static void test_broadcast(
236  test_context *ctx,
237  void (*broadcast_action)(SMP_Action_handler, void *)
238)
239{
240  uint32_t step;
241  uint32_t i;
242  uint32_t n;
243
244  T_plan(1);
245  step = 0;
246  n = rtems_scheduler_get_processor_maximum();
247
248  for (i = 0; i < n; ++i) {
249    uint32_t j;
250
251    clear_ids_by_worker(ctx, 0);
252
253    (*broadcast_action)(action, &ctx->id[0][0]);
254
255    for (j = 0; j < n; ++j) {
256      unsigned id;
257
258      ++step;
259      id = _Atomic_Load_uint(&ctx->id[0][j], ATOMIC_ORDER_RELAXED);
260      T_quiet_eq_uint(j + 1, id);
261    }
262  }
263
264  T_step_eq_u32(0, step, n * n);
265}
266
267static rtems_interval test_duration(void)
268{
269  return rtems_clock_get_ticks_per_second();
270}
271
272static rtems_interval test_broadcast_init(
273  rtems_test_parallel_context *base,
274  void *arg,
275  size_t active_workers
276)
277{
278  return test_duration();
279}
280
281static void test_broadcast_body(
282  rtems_test_parallel_context *base,
283  void *arg,
284  size_t active_workers,
285  size_t worker_index
286)
287{
288  test_context *ctx;
289
290  ctx = (test_context *) base;
291
292  while (!rtems_test_parallel_stop_job(&ctx->base)) {
293    Per_CPU_Control *cpu_self;
294
295    clear_ids_by_worker(ctx, worker_index);
296    cpu_self = _Thread_Dispatch_disable();
297    _SMP_Multicast_action(NULL, action, &ctx->id[worker_index][0]);
298    _Thread_Dispatch_enable(cpu_self);
299  }
300}
301
302static void test_broadcast_fini(
303  rtems_test_parallel_context *base,
304  void *arg,
305  size_t active_workers
306)
307{
308  /* Do nothing */
309}
310
311static const rtems_test_parallel_job test_jobs[] = {
312  {
313    .init = test_broadcast_init,
314    .body = test_broadcast_body,
315    .fini = test_broadcast_fini,
316    .cascade = true
317  }
318};
319
320T_TEST_CASE(ParallelBroadcast)
321{
322  rtems_test_parallel(
323    &test_instance.base,
324    NULL,
325    &test_jobs[0],
326    RTEMS_ARRAY_SIZE(test_jobs)
327  );
328}
329
330static void test_before_multitasking(void)
331{
332  test_context *ctx;
333
334  ctx = &test_instance;
335
336  T_case_begin("UnicastBeforeMultitasking", NULL);
337  test_unicast(ctx, _SMP_Unicast_action);
338  T_case_end();
339
340  T_case_begin("UnicastBeforeMultitaskingIRQDisabled", NULL);
341  test_unicast(ctx, unicast_action_irq_disabled);
342  T_case_end();
343
344  T_case_begin("UnicastBeforeMultitaskingDispatchDisabled", NULL);
345  test_unicast(ctx, unicast_action_dispatch_disabled);
346  T_case_end();
347
348  T_case_begin("MulticastBeforeMultitasking", NULL);
349  test_multicast(ctx, _SMP_Multicast_action);
350  T_case_end();
351
352  T_case_begin("MulticastBeforeMultitaskingIRQDisabled", NULL);
353  test_multicast(ctx, multicast_action_irq_disabled);
354  T_case_end();
355
356  T_case_begin("MulticastBeforeMultitaskingDispatchDisabled", NULL);
357  test_multicast(ctx, multicast_action_dispatch_disabled);
358  T_case_end();
359
360  T_case_begin("BroadcastBeforeMultitasking", NULL);
361  test_broadcast(ctx, _SMP_Broadcast_action);
362  T_case_end();
363
364  T_case_begin("BroadcastBeforeMultitaskingIRQDisabled", NULL);
365  test_broadcast(ctx, broadcast_action_irq_disabled);
366  T_case_end();
367
368  T_case_begin("BroadcastBeforeMultitaskingDispatchDisabled", NULL);
369  test_broadcast(ctx, broadcast_action_dispatch_disabled);
370  T_case_end();
371}
372
373static void after_drivers(void)
374{
375  TEST_BEGIN();
376  T_run_initialize(&config);
377  test_before_multitasking();
378}
379
380RTEMS_SYSINIT_ITEM(
381  after_drivers,
382  RTEMS_SYSINIT_DEVICE_DRIVERS,
383  RTEMS_SYSINIT_ORDER_LAST
384);
385
386static void set_wrong_cpu_state(void *arg)
387{
388  Per_CPU_Control *cpu_self;
389
390  cpu_self = arg;
391  T_step_eq_ptr(0, cpu_self, _Per_CPU_Get());
392  cpu_self->state = 123;
393
394  while (true) {
395    /* Do nothing */
396  }
397}
398
399static void test_wrong_cpu_state_to_perform_jobs(void)
400{
401  Per_CPU_Control *cpu_self;
402  rtems_interrupt_level level;
403  Processor_mask targets;
404  uint32_t cpu_index;
405
406  T_case_begin("WrongCPUStateToPerformJobs", NULL);
407  T_plan(4);
408  cpu_self = _Thread_Dispatch_disable();
409
410  cpu_index = _Per_CPU_Get_index(cpu_self);
411  cpu_index = (cpu_index + 1) % rtems_scheduler_get_processor_maximum();
412  _Processor_mask_Zero(&targets);
413  _Processor_mask_Set(&targets, cpu_index);
414
415  rtems_interrupt_local_disable(level);
416
417  _SMP_Multicast_action(
418    &targets,
419    set_wrong_cpu_state,
420    _Per_CPU_Get_by_index(cpu_index)
421  );
422
423  /* If everything is all right, we don't end up here */
424  rtems_interrupt_local_enable(level);
425  _Thread_Dispatch_enable(cpu_self);
426  rtems_fatal(RTEMS_FATAL_SOURCE_APPLICATION, 0);
427}
428
429#define TEST_JOB_ORDER_JOBS 3
430
431static Per_CPU_Job job_order_jobs[TEST_JOB_ORDER_JOBS];
432
433static void job_order_handler_0(void *arg)
434{
435  T_step(1);
436}
437
438static void job_order_handler_1(void *arg)
439{
440  T_step(2);
441}
442
443static void job_order_handler_2(void *arg)
444{
445  T_step(3);
446}
447
448static const Per_CPU_Job_context job_order_contexts[TEST_JOB_ORDER_JOBS] = {
449  { .handler = job_order_handler_0 },
450  { .handler = job_order_handler_1 },
451  { .handler = job_order_handler_2 }
452};
453
454T_TEST_CASE(JobOrder)
455{
456  Per_CPU_Control *cpu_self;
457  size_t i;
458
459  T_plan(4);
460  cpu_self = _Thread_Dispatch_disable();
461
462  for (i = 0; i < TEST_JOB_ORDER_JOBS; ++i) {
463    job_order_jobs[i].context = &job_order_contexts[i];
464    _Per_CPU_Add_job(cpu_self, &job_order_jobs[i]);
465  }
466
467  T_step(0);
468  _SMP_Send_message(_Per_CPU_Get_index(cpu_self), SMP_MESSAGE_PERFORM_JOBS);
469  _Thread_Dispatch_enable(cpu_self);
470}
471
472#define TEST_ADD_JOB_IN_JOB_JOBS 3
473
474static Per_CPU_Job add_job_in_job_jobs[TEST_ADD_JOB_IN_JOB_JOBS];
475
476static void add_job_in_job_handler_0(void *arg)
477{
478  T_step(1);
479  _Per_CPU_Add_job(_Per_CPU_Get(), &add_job_in_job_jobs[1]);
480}
481
482static void add_job_in_job_handler_1(void *arg)
483{
484  T_step(3);
485}
486
487static const Per_CPU_Job_context
488add_job_in_job_contexts[TEST_ADD_JOB_IN_JOB_JOBS] = {
489  { .handler = add_job_in_job_handler_0 },
490  { .handler = add_job_in_job_handler_1 }
491};
492
493T_TEST_CASE(AddJobInJob)
494{
495  Per_CPU_Control *cpu_self;
496  size_t i;
497
498  T_plan(4);
499  cpu_self = _Thread_Dispatch_disable();
500
501  for (i = 0; i < TEST_ADD_JOB_IN_JOB_JOBS; ++i) {
502    add_job_in_job_jobs[i].context = &add_job_in_job_contexts[i];
503  }
504
505  _Per_CPU_Add_job(cpu_self, &add_job_in_job_jobs[0]);
506  T_step(0);
507  _SMP_Send_message(_Per_CPU_Get_index(cpu_self), SMP_MESSAGE_PERFORM_JOBS);
508  T_step(2);
509  _SMP_Send_message(_Per_CPU_Get_index(cpu_self), SMP_MESSAGE_PERFORM_JOBS);
510  _Thread_Dispatch_enable(cpu_self);
511}
512
513T_TEST_CASE(UnicastDuringMultitaskingIRQDisabled)
514{
515  test_unicast(&test_instance, unicast_action_irq_disabled);
516}
517
518T_TEST_CASE(UnicastDuringMultitaskingDispatchDisabled)
519{
520  test_unicast(&test_instance, unicast_action_dispatch_disabled);
521}
522
523T_TEST_CASE(MulticastDuringMultitaskingIRQDisabled)
524{
525  test_multicast(&test_instance, multicast_action_irq_disabled);
526}
527
528T_TEST_CASE(MulticastDuringMultitaskingDispatchDisabled)
529{
530  test_multicast(&test_instance, multicast_action_dispatch_disabled);
531}
532
533T_TEST_CASE(BroadcastDuringMultitaskingIRQDisabled)
534{
535  test_broadcast(&test_instance, broadcast_action_irq_disabled);
536}
537
538T_TEST_CASE(BroadcastDuringMultitaskingDispatchDisabled)
539{
540  test_broadcast(&test_instance, broadcast_action_dispatch_disabled);
541}
542
543static void Init(rtems_task_argument arg)
544{
545  T_register();
546  T_run_all();
547
548  if (rtems_scheduler_get_processor_maximum() > 1) {
549    test_wrong_cpu_state_to_perform_jobs();
550  } else {
551    rtems_fatal(RTEMS_FATAL_SOURCE_APPLICATION, 0);
552  }
553}
554
555static void fatal_extension(
556  rtems_fatal_source source,
557  bool always_set_to_false,
558  rtems_fatal_code code
559)
560{
561  bool ok;
562
563  if (source == RTEMS_FATAL_SOURCE_SMP) {
564    T_step_eq_int(1, source, RTEMS_FATAL_SOURCE_SMP);
565    T_step_false(2, always_set_to_false, "unexpected argument value");
566    T_step_eq_int(3, code, SMP_FATAL_WRONG_CPU_STATE_TO_PERFORM_JOBS);
567    T_case_end();
568
569    ok = T_run_finalize();
570    rtems_test_assert(ok);
571    TEST_END();
572  } else if (source == RTEMS_FATAL_SOURCE_APPLICATION) {
573    ok = T_run_finalize();
574    rtems_test_assert(ok);
575    TEST_END();
576  }
577}
578
579#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
580
581#define CONFIGURE_MAXIMUM_TASKS CPU_COUNT
582
583#define CONFIGURE_MAXIMUM_TIMERS 1
584
585#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
586
587#define CONFIGURE_INITIAL_EXTENSIONS \
588  { .fatal = fatal_extension }, \
589  RTEMS_TEST_INITIAL_EXTENSION
590
591#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
592
593#define CONFIGURE_INIT
594
595#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.