source: rtems/testsuites/smptests/smpipi01/init.c @ 577293f0

5
Last change on this file since 577293f0 was 577293f0, checked in by Sebastian Huber <sebastian.huber@…>, on 04/28/19 at 12:31:10

score: Add _SMP_Synchronize()

  • Property mode set to 100644
File size: 6.5 KB
Line 
1/*
2 * Copyright (c) 2014, 2019 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#include <rtems/score/smpimpl.h>
20#include <rtems/score/smpbarrier.h>
21#include <rtems.h>
22
23#include <stdio.h>
24
25#include "tmacros.h"
26
27const char rtems_test_name[] = "SMPIPI 1";
28
29#define CPU_COUNT 32
30
31typedef struct {
32  uint32_t value;
33  uint32_t cache_line_separation[31];
34} test_counter;
35
36typedef struct {
37  test_counter counters[CPU_COUNT];
38  uint32_t copy_counters[CPU_COUNT];
39  SMP_barrier_Control barrier;
40  SMP_barrier_State main_barrier_state;
41  SMP_barrier_State worker_barrier_state;
42  Per_CPU_Job jobs[CPU_COUNT][2];
43} test_context;
44
45static test_context test_instance = {
46  .barrier = SMP_BARRIER_CONTROL_INITIALIZER,
47  .main_barrier_state = SMP_BARRIER_STATE_INITIALIZER,
48  .worker_barrier_state = SMP_BARRIER_STATE_INITIALIZER
49};
50
51static void barrier(
52  test_context *ctx,
53  SMP_barrier_State *state
54)
55{
56  _SMP_barrier_Wait(&ctx->barrier, state, 2);
57}
58
59static void barrier_1_handler(void *arg)
60{
61  test_context *ctx = arg;
62  uint32_t cpu_index_self = _SMP_Get_current_processor();
63  SMP_barrier_State *bs = &ctx->worker_barrier_state;
64
65  ++ctx->counters[cpu_index_self].value;
66
67  /* (D) */
68  barrier(ctx, bs);
69}
70
71static const Per_CPU_Job_context barrier_1_job_context = {
72  .handler = barrier_1_handler,
73  .arg = &test_instance
74};
75
76static void barrier_0_handler(void *arg)
77{
78  test_context *ctx = arg;
79  uint32_t cpu_index_self = _SMP_Get_current_processor();
80  SMP_barrier_State *bs = &ctx->worker_barrier_state;
81
82  ++ctx->counters[cpu_index_self].value;
83
84  /* (A) */
85  barrier(ctx, bs);
86
87  /* (B) */
88  barrier(ctx, bs);
89
90  /* (C) */
91  barrier(ctx, bs);
92
93  ctx->jobs[0][1].context = &barrier_1_job_context;
94  _Per_CPU_Add_job(_Per_CPU_Get(), &ctx->jobs[0][1]);
95}
96
97static const Per_CPU_Job_context barrier_0_job_context = {
98  .handler = barrier_0_handler,
99  .arg = &test_instance
100};
101
102static void test_send_message_while_processing_a_message(
103  test_context *ctx,
104  uint32_t cpu_index_self,
105  uint32_t cpu_count
106)
107{
108  SMP_barrier_State *bs = &ctx->main_barrier_state;
109  uint32_t cpu_index;
110  rtems_status_code sc;
111  cpu_set_t cpuset;
112
113  rtems_test_assert(cpu_index_self < CPU_SETSIZE);
114  CPU_ZERO(&cpuset);
115  CPU_SET((int) cpu_index_self, &cpuset);
116  sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset);
117  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
118
119  for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
120    if (cpu_index != cpu_index_self) {
121      ctx->jobs[0][0].context = &barrier_0_job_context;
122      _Per_CPU_Add_job(_Per_CPU_Get_by_index(cpu_index), &ctx->jobs[0][0]);
123      _SMP_Send_message(cpu_index, SMP_MESSAGE_PERFORM_JOBS);
124
125      /* (A) */
126      barrier(ctx, bs);
127
128      rtems_test_assert(ctx->counters[cpu_index].value == 1);
129      _SMP_Send_message(cpu_index, SMP_MESSAGE_PERFORM_JOBS);
130
131      /* (B) */
132      barrier(ctx, bs);
133
134      rtems_test_assert(ctx->counters[cpu_index].value == 1);
135
136      /* (C) */
137      barrier(ctx, bs);
138
139      /* (D) */
140      barrier(ctx, bs);
141
142      rtems_test_assert(ctx->counters[cpu_index].value == 2);
143
144      ctx->counters[cpu_index].value = 0;
145    }
146  }
147}
148
149static void counter_handler(void *arg, size_t next_job)
150{
151  test_context *ctx = arg;
152  Per_CPU_Control *cpu_self = _Per_CPU_Get();
153  uint32_t cpu_index_self = _Per_CPU_Get_index(cpu_self);
154
155  ++ctx->counters[cpu_index_self].value;
156  _Per_CPU_Add_job(cpu_self, &ctx->jobs[cpu_index_self][next_job]);
157}
158
159static void counter_0_handler(void *arg)
160{
161  counter_handler(arg, 1);
162}
163
164static const Per_CPU_Job_context counter_0_job_context = {
165  .handler = counter_0_handler,
166  .arg = &test_instance
167};
168
169static void counter_1_handler(void *arg)
170{
171  counter_handler(arg, 0);
172}
173
174static const Per_CPU_Job_context counter_1_job_context = {
175  .handler = counter_1_handler,
176  .arg = &test_instance
177};
178
179static void test_send_message_flood(
180  test_context *ctx,
181  uint32_t cpu_count
182)
183{
184  uint32_t cpu_index_self = rtems_scheduler_get_processor();
185  uint32_t cpu_index;
186
187  for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
188    Per_CPU_Control *cpu = _Per_CPU_Get_by_index(cpu_index);
189
190    ctx->jobs[cpu_index][0].context = &counter_0_job_context;
191    ctx->jobs[cpu_index][1].context = &counter_1_job_context;
192    _Per_CPU_Add_job(cpu, &ctx->jobs[cpu_index][0]);
193    _SMP_Send_message(cpu_index, SMP_MESSAGE_PERFORM_JOBS);
194  }
195
196  for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
197    Per_CPU_Control *cpu_self;
198    uint32_t i;
199
200    cpu_self = _Thread_Dispatch_disable();
201    _SMP_Synchronize();
202    _Thread_Dispatch_enable(cpu_self);
203
204    for (i = 0; i < cpu_count; ++i) {
205      if (i != cpu_index) {
206        ctx->copy_counters[i] = ctx->counters[i].value;
207      }
208    }
209
210    for (i = 0; i < 100000; ++i) {
211      _SMP_Send_message(cpu_index, SMP_MESSAGE_PERFORM_JOBS);
212    }
213
214    for (i = 0; i < cpu_count; ++i) {
215      if (i != cpu_index) {
216        rtems_test_assert(ctx->copy_counters[i] == ctx->counters[i].value);
217      }
218    }
219  }
220
221  for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
222    rtems_test_assert(
223      _Processor_mask_Is_set(_SMP_Get_online_processors(), cpu_index)
224    );
225
226    printf(
227      "inter-processor interrupts for processor %"
228        PRIu32 "%s: %" PRIu32 "\n",
229      cpu_index,
230      cpu_index == cpu_index_self ? " (main)" : "",
231      ctx->counters[cpu_index].value
232    );
233  }
234
235  for (; cpu_index < CPU_COUNT; ++cpu_index) {
236    rtems_test_assert(
237      !_Processor_mask_Is_set(_SMP_Get_online_processors(), cpu_index)
238    );
239  }
240}
241
242static void test(void)
243{
244  test_context *ctx = &test_instance;
245  uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
246  uint32_t cpu_index_self;
247
248  for (cpu_index_self = 0; cpu_index_self < cpu_count; ++cpu_index_self) {
249    test_send_message_while_processing_a_message(ctx, cpu_index_self, cpu_count);
250  }
251
252  test_send_message_flood(ctx, cpu_count);
253}
254
255static void Init(rtems_task_argument arg)
256{
257  TEST_BEGIN();
258
259  test();
260
261  TEST_END();
262  rtems_test_exit(0);
263}
264
265#define CONFIGURE_APPLICATION_DOES_NOT_NEED_CLOCK_DRIVER
266#define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
267
268#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
269
270#define CONFIGURE_MAXIMUM_TASKS 1
271
272#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
273
274#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
275
276#define CONFIGURE_INIT
277
278#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.