source: rtems/testsuites/smptests/smpmulticast01/init.c @ c63e8bb

Last change on this file since c63e8bb was c63e8bb, checked in by Sebastian Huber <sebastian.huber@…>, on Apr 19, 2019 at 9:02:02 AM

score: Modify _Per_CPU_Perform_jobs()

Process only the jobs initially registered on the processing list. This
makes it possible to add jobs for the current processor in a job
handler. These jobs are processed with the next
SMP_MESSAGE_PERFORM_JOBS message. The lock is only acquired and
released once.

  • Property mode set to 100644
File size: 11.7 KB
Line 
1/*
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2019 embedded brains GmbH
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <rtems/score/smpimpl.h>
29#include <rtems/score/atomic.h>
30#include <rtems/score/threaddispatch.h>
31#include <rtems/sysinit.h>
32#include <rtems.h>
33
34#include <string.h>
35
36#include <t.h>
37#include <tmacros.h>
38
39#define CPU_COUNT 32
40
41const char rtems_test_name[] = "SMPMULTICAST 1";
42
43static const T_config config = {
44  .name = "SMPMultiCast",
45  .putchar = T_putchar_default,
46  .verbosity = T_VERBOSE,
47  .now = T_now
48};
49
50typedef struct {
51  rtems_test_parallel_context base;
52  Atomic_Uint id[CPU_COUNT][CPU_COUNT];
53} test_context;
54
55static test_context test_instance;
56
57static void clear_ids_by_worker(test_context *ctx, size_t worker_index)
58{
59  memset(&ctx->id[worker_index][0], 0, sizeof(ctx->id[worker_index]));
60}
61
62static void multicast_action_irq_disabled(
63  const Processor_mask *targets,
64  SMP_Action_handler handler,
65  void *arg
66)
67{
68  rtems_interrupt_level level;
69
70  rtems_interrupt_local_disable(level);
71  _SMP_Multicast_action(targets, handler, arg);
72  rtems_interrupt_local_enable(level);
73}
74
75static void multicast_action_dispatch_disabled(
76  const Processor_mask *targets,
77  SMP_Action_handler handler,
78  void *arg
79)
80{
81  Per_CPU_Control *cpu_self;
82
83  cpu_self = _Thread_Dispatch_disable();
84  _SMP_Multicast_action(targets, handler, arg);
85  _Thread_Dispatch_enable(cpu_self);
86}
87
88static void broadcast_action_irq_disabled(
89  SMP_Action_handler handler,
90  void *arg
91)
92{
93  rtems_interrupt_level level;
94
95  rtems_interrupt_local_disable(level);
96  _SMP_Broadcast_action(handler, arg);
97  rtems_interrupt_local_enable(level);
98}
99
100static void broadcast_action_dispatch_disabled(
101  SMP_Action_handler handler,
102  void *arg
103)
104{
105  Per_CPU_Control *cpu_self;
106
107  cpu_self = _Thread_Dispatch_disable();
108  _SMP_Broadcast_action(handler, arg);
109  _Thread_Dispatch_enable(cpu_self);
110}
111
112static void action(void *arg)
113{
114  Atomic_Uint *id;
115  uint32_t self;
116  unsigned expected;
117  bool success;
118
119  id = arg;
120  self = rtems_scheduler_get_processor();
121  expected = 0;
122  success = _Atomic_Compare_exchange_uint(
123    &id[self],
124    &expected,
125    self + 1,
126    ATOMIC_ORDER_RELAXED,
127    ATOMIC_ORDER_RELAXED
128  );
129  T_quiet_true(success, "set CPU identifier failed");
130}
131
132static void test_unicast(
133  test_context *ctx,
134  void (*multicast_action)(const Processor_mask *, SMP_Action_handler, void *)
135)
136{
137  uint32_t step;
138  uint32_t i;
139  uint32_t n;
140
141  T_plan(1);
142  step = 0;
143  n = rtems_scheduler_get_processor_maximum();
144
145  for (i = 0; i < n; ++i) {
146    Processor_mask cpus;
147    uint32_t j;
148
149    clear_ids_by_worker(ctx, 0);
150
151    _Processor_mask_Zero(&cpus);
152    _Processor_mask_Set(&cpus, i);
153    (*multicast_action)(&cpus, action, &ctx->id[0][0]);
154
155    for (j = 0; j < n; ++j) {
156      unsigned id;
157
158      ++step;
159      id = _Atomic_Load_uint(&ctx->id[0][j], ATOMIC_ORDER_RELAXED);
160
161      if (j == i) {
162        T_quiet_eq_uint(j + 1, id);
163      } else {
164        T_quiet_eq_uint(0, id);
165      }
166    }
167  }
168
169  T_step_eq_u32(0, step, n * n);
170}
171
172static void test_broadcast(
173  test_context *ctx,
174  void (*broadcast_action)(SMP_Action_handler, void *)
175)
176{
177  uint32_t step;
178  uint32_t i;
179  uint32_t n;
180
181  T_plan(1);
182  step = 0;
183  n = rtems_scheduler_get_processor_maximum();
184
185  for (i = 0; i < n; ++i) {
186    uint32_t j;
187
188    clear_ids_by_worker(ctx, 0);
189
190    (*broadcast_action)(action, &ctx->id[0][0]);
191
192    for (j = 0; j < n; ++j) {
193      unsigned id;
194
195      ++step;
196      id = _Atomic_Load_uint(&ctx->id[0][j], ATOMIC_ORDER_RELAXED);
197      T_quiet_eq_uint(j + 1, id);
198    }
199  }
200
201  T_step_eq_u32(0, step, n * n);
202}
203
204static rtems_interval test_duration(void)
205{
206  return rtems_clock_get_ticks_per_second();
207}
208
209static rtems_interval test_broadcast_init(
210  rtems_test_parallel_context *base,
211  void *arg,
212  size_t active_workers
213)
214{
215  return test_duration();
216}
217
218static void test_broadcast_body(
219  rtems_test_parallel_context *base,
220  void *arg,
221  size_t active_workers,
222  size_t worker_index
223)
224{
225  test_context *ctx;
226
227  ctx = (test_context *) base;
228
229  while (!rtems_test_parallel_stop_job(&ctx->base)) {
230    Per_CPU_Control *cpu_self;
231
232    clear_ids_by_worker(ctx, worker_index);
233    cpu_self = _Thread_Dispatch_disable();
234    _SMP_Multicast_action(NULL, action, &ctx->id[worker_index][0]);
235    _Thread_Dispatch_enable(cpu_self);
236  }
237}
238
239static void test_broadcast_fini(
240  rtems_test_parallel_context *base,
241  void *arg,
242  size_t active_workers
243)
244{
245  /* Do nothing */
246}
247
248static const rtems_test_parallel_job test_jobs[] = {
249  {
250    .init = test_broadcast_init,
251    .body = test_broadcast_body,
252    .fini = test_broadcast_fini,
253    .cascade = true
254  }
255};
256
257T_TEST_CASE(ParallelBroadcast)
258{
259  rtems_test_parallel(
260    &test_instance.base,
261    NULL,
262    &test_jobs[0],
263    RTEMS_ARRAY_SIZE(test_jobs)
264  );
265}
266
267static void test_before_multitasking(void)
268{
269  test_context *ctx;
270
271  ctx = &test_instance;
272
273  T_case_begin("UnicastBeforeMultitasking", NULL);
274  test_unicast(ctx, _SMP_Multicast_action);
275  T_case_end();
276
277  T_case_begin("UnicastBeforeMultitaskingIRQDisabled", NULL);
278  test_unicast(ctx, multicast_action_irq_disabled);
279  T_case_end();
280
281  T_case_begin("UnicastBeforeMultitaskingDispatchDisabled", NULL);
282  test_unicast(ctx, multicast_action_dispatch_disabled);
283  T_case_end();
284
285  T_case_begin("BroadcastBeforeMultitasking", NULL);
286  test_broadcast(ctx, _SMP_Broadcast_action);
287  T_case_end();
288
289  T_case_begin("BroadcastBeforeMultitaskingIRQDisabled", NULL);
290  test_broadcast(ctx, broadcast_action_irq_disabled);
291  T_case_end();
292
293  T_case_begin("BroadcastBeforeMultitaskingDispatchDisabled", NULL);
294  test_broadcast(ctx, broadcast_action_dispatch_disabled);
295  T_case_end();
296}
297
298static void after_drivers(void)
299{
300  TEST_BEGIN();
301  T_run_initialize(&config);
302  test_before_multitasking();
303}
304
305RTEMS_SYSINIT_ITEM(
306  after_drivers,
307  RTEMS_SYSINIT_DEVICE_DRIVERS,
308  RTEMS_SYSINIT_ORDER_LAST
309);
310
311static void set_wrong_cpu_state(void *arg)
312{
313  Per_CPU_Control *cpu_self;
314
315  cpu_self = arg;
316  T_step_eq_ptr(0, cpu_self, _Per_CPU_Get());
317  cpu_self->state = 123;
318
319  while (true) {
320    /* Do nothing */
321  }
322}
323
324static void test_wrong_cpu_state_to_perform_jobs(void)
325{
326  Per_CPU_Control *cpu_self;
327  rtems_interrupt_level level;
328  Processor_mask targets;
329  uint32_t cpu_index;
330
331  T_case_begin("WrongCPUStateToPerformJobs", NULL);
332  T_plan(4);
333  cpu_self = _Thread_Dispatch_disable();
334
335  cpu_index = _Per_CPU_Get_index(cpu_self);
336  cpu_index = (cpu_index + 1) % rtems_scheduler_get_processor_maximum();
337  _Processor_mask_Zero(&targets);
338  _Processor_mask_Set(&targets, cpu_index);
339
340  rtems_interrupt_local_disable(level);
341
342  _SMP_Multicast_action(
343    &targets,
344    set_wrong_cpu_state,
345    _Per_CPU_Get_by_index(cpu_index)
346  );
347
348  /* If everything is all right, we don't end up here */
349  rtems_interrupt_local_enable(level);
350  _Thread_Dispatch_enable(cpu_self);
351  rtems_fatal(RTEMS_FATAL_SOURCE_APPLICATION, 0);
352}
353
354#define TEST_JOB_ORDER_JOBS 3
355
356static Per_CPU_Job job_order_jobs[TEST_JOB_ORDER_JOBS];
357
358static void job_order_handler_0(void *arg)
359{
360  T_step(1, "invalid job order");
361}
362
363static void job_order_handler_1(void *arg)
364{
365  T_step(2, "invalid job order");
366}
367
368static void job_order_handler_2(void *arg)
369{
370  T_step(3, "invalid job order");
371}
372
373static const Per_CPU_Job_context job_order_contexts[TEST_JOB_ORDER_JOBS] = {
374  { .handler = job_order_handler_0 },
375  { .handler = job_order_handler_1 },
376  { .handler = job_order_handler_2 }
377};
378
379T_TEST_CASE(JobOrder)
380{
381  Per_CPU_Control *cpu_self;
382  size_t i;
383
384  T_plan(4);
385  cpu_self = _Thread_Dispatch_disable();
386
387  for (i = 0; i < TEST_JOB_ORDER_JOBS; ++i) {
388    job_order_jobs[i].context = &job_order_contexts[i];
389    _Per_CPU_Add_job(cpu_self, &job_order_jobs[i]);
390  }
391
392  T_step(0, "wrong job processing time");
393  _SMP_Send_message(_Per_CPU_Get_index(cpu_self), SMP_MESSAGE_PERFORM_JOBS);
394  _Thread_Dispatch_enable(cpu_self);
395}
396
397#define TEST_ADD_JOB_IN_JOB_JOBS 3
398
399static Per_CPU_Job add_job_in_job_jobs[TEST_ADD_JOB_IN_JOB_JOBS];
400
401static void add_job_in_job_handler_0(void *arg)
402{
403  T_step(1, "invalid job order");
404  _Per_CPU_Add_job(_Per_CPU_Get(), &add_job_in_job_jobs[1]);
405}
406
407static void add_job_in_job_handler_1(void *arg)
408{
409  T_step(3, "invalid job order");
410}
411
412static const Per_CPU_Job_context
413add_job_in_job_contexts[TEST_ADD_JOB_IN_JOB_JOBS] = {
414  { .handler = add_job_in_job_handler_0 },
415  { .handler = add_job_in_job_handler_1 }
416};
417
418T_TEST_CASE(AddJobInJob)
419{
420  Per_CPU_Control *cpu_self;
421  size_t i;
422
423  T_plan(4);
424  cpu_self = _Thread_Dispatch_disable();
425
426  for (i = 0; i < TEST_ADD_JOB_IN_JOB_JOBS; ++i) {
427    add_job_in_job_jobs[i].context = &add_job_in_job_contexts[i];
428  }
429
430  _Per_CPU_Add_job(cpu_self, &add_job_in_job_jobs[0]);
431  T_step(0, "wrong job processing time");
432  _SMP_Send_message(_Per_CPU_Get_index(cpu_self), SMP_MESSAGE_PERFORM_JOBS);
433  T_step(2, "wrong job processing time");
434  _SMP_Send_message(_Per_CPU_Get_index(cpu_self), SMP_MESSAGE_PERFORM_JOBS);
435  _Thread_Dispatch_enable(cpu_self);
436}
437
438T_TEST_CASE(UnicastDuringMultitaskingIRQDisabled)
439{
440  test_unicast(&test_instance, multicast_action_irq_disabled);
441}
442
443T_TEST_CASE(UnicastDuringMultitaskingDispatchDisabled)
444{
445  test_unicast(&test_instance, multicast_action_dispatch_disabled);
446}
447
448T_TEST_CASE(BroadcastDuringMultitaskingIRQDisabled)
449{
450  test_broadcast(&test_instance, broadcast_action_irq_disabled);
451}
452
453T_TEST_CASE(BroadcastDuringMultitaskingDispatchDisabled)
454{
455  test_broadcast(&test_instance, broadcast_action_dispatch_disabled);
456}
457
458static void Init(rtems_task_argument arg)
459{
460  T_register();
461  T_run_all();
462
463  if (rtems_scheduler_get_processor_maximum() > 1) {
464    test_wrong_cpu_state_to_perform_jobs();
465  } else {
466    rtems_fatal(RTEMS_FATAL_SOURCE_APPLICATION, 0);
467  }
468}
469
470static void fatal_extension(
471  rtems_fatal_source source,
472  bool always_set_to_false,
473  rtems_fatal_code code
474)
475{
476  bool ok;
477
478  if (source == RTEMS_FATAL_SOURCE_SMP) {
479    T_step_eq_int(1, source, RTEMS_FATAL_SOURCE_SMP);
480    T_step_false(2, always_set_to_false, "unexpected argument value");
481    T_step_eq_int(3, code, SMP_FATAL_WRONG_CPU_STATE_TO_PERFORM_JOBS);
482    T_case_end();
483
484    ok = T_run_finalize();
485    rtems_test_assert(ok);
486    TEST_END();
487  } else if (source == RTEMS_FATAL_SOURCE_APPLICATION) {
488    ok = T_run_finalize();
489    rtems_test_assert(ok);
490    TEST_END();
491  }
492}
493
494#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
495
496#define CONFIGURE_MAXIMUM_TASKS CPU_COUNT
497
498#define CONFIGURE_MAXIMUM_TIMERS 1
499
500#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
501
502#define CONFIGURE_INITIAL_EXTENSIONS \
503  { .fatal = fatal_extension }, \
504  RTEMS_TEST_INITIAL_EXTENSION
505
506#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
507
508#define CONFIGURE_INIT
509
510#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.