source: rtems/testsuites/smptests/smpipi01/init.c @ f9219db

5
Last change on this file since f9219db was f9219db, checked in by Sebastian Huber <sebastian.huber@…>, on 04/05/19 at 06:16:05

rtems: Add rtems_scheduler_get_processor_maximum()

Add rtems_scheduler_get_processor_maximum() as a replacement for
rtems_get_processor_count(). The rtems_get_processor_count() is a bit
orphaned. Adopt it by the Scheduler Manager. The count is also
misleading, since the processor set may have gaps and the actual count
of online processors may be less than the value returned by
rtems_get_processor_count().

Update #3732.

  • Property mode set to 100644
File size: 4.7 KB
Line 
1/*
2 * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#include <rtems/score/smpimpl.h>
20#include <rtems/score/smpbarrier.h>
21#include <rtems/counter.h>
22#include <rtems.h>
23
24#include <stdio.h>
25
26#include "tmacros.h"
27
28const char rtems_test_name[] = "SMPIPI 1";
29
30#define CPU_COUNT 32
31
32typedef struct {
33  uint32_t value;
34  uint32_t cache_line_separation[31];
35} test_counter;
36
37typedef struct {
38  test_counter counters[CPU_COUNT];
39  uint32_t copy_counters[CPU_COUNT];
40  SMP_barrier_Control barrier;
41  SMP_barrier_State main_barrier_state;
42  SMP_barrier_State worker_barrier_state;
43} test_context;
44
45static test_context test_instance = {
46  .barrier = SMP_BARRIER_CONTROL_INITIALIZER,
47  .main_barrier_state = SMP_BARRIER_STATE_INITIALIZER,
48  .worker_barrier_state = SMP_BARRIER_STATE_INITIALIZER
49};
50
51static void barrier(
52  test_context *ctx,
53  SMP_barrier_State *state
54)
55{
56  _SMP_barrier_Wait(&ctx->barrier, state, 2);
57}
58
59static void barrier_handler(Per_CPU_Control *cpu_self)
60{
61  test_context *ctx = &test_instance;
62  uint32_t cpu_index_self = _Per_CPU_Get_index(cpu_self);
63  SMP_barrier_State *bs = &ctx->worker_barrier_state;
64
65  ++ctx->counters[cpu_index_self].value;
66
67  /* (A) */
68  barrier(ctx, bs);
69
70  /* (B) */
71  barrier(ctx, bs);
72
73  /* (C) */
74  barrier(ctx, bs);
75}
76
77static void test_send_message_while_processing_a_message(
78  test_context *ctx
79)
80{
81  uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
82  uint32_t cpu_index_self = rtems_scheduler_get_processor();
83  uint32_t cpu_index;
84  SMP_barrier_State *bs = &ctx->main_barrier_state;
85
86  _SMP_Set_test_message_handler(barrier_handler);
87
88  for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
89    if (cpu_index != cpu_index_self) {
90      _SMP_Send_message(cpu_index, SMP_MESSAGE_TEST);
91
92      /* (A) */
93      barrier(ctx, bs);
94
95      rtems_test_assert(ctx->counters[cpu_index].value == 1);
96      _SMP_Send_message(cpu_index, SMP_MESSAGE_TEST);
97
98      /* (B) */
99      barrier(ctx, bs);
100
101      rtems_test_assert(ctx->counters[cpu_index].value == 1);
102
103      /* (C) */
104      barrier(ctx, bs);
105
106      /* (A) */
107      barrier(ctx, bs);
108
109      rtems_test_assert(ctx->counters[cpu_index].value == 2);
110
111      /* (B) */
112      barrier(ctx, bs);
113
114      /* (C) */
115      barrier(ctx, bs);
116
117      ctx->counters[cpu_index].value = 0;
118    }
119  }
120}
121
122static void counter_handler(Per_CPU_Control *cpu_self)
123{
124  test_context *ctx = &test_instance;
125  uint32_t cpu_index_self = _Per_CPU_Get_index(cpu_self);
126
127  ++ctx->counters[cpu_index_self].value;
128}
129
130static void test_send_message_flood(
131  test_context *ctx
132)
133{
134  uint32_t cpu_count = rtems_scheduler_get_processor_maximum();
135  uint32_t cpu_index_self = rtems_scheduler_get_processor();
136  uint32_t cpu_index;
137
138  _SMP_Set_test_message_handler(counter_handler);
139
140  for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
141    uint32_t i;
142
143    /* Wait 1us so that all outstanding messages have been processed */
144    rtems_counter_delay_nanoseconds(1000000);
145
146    for (i = 0; i < cpu_count; ++i) {
147      if (i != cpu_index) {
148        ctx->copy_counters[i] = ctx->counters[i].value;
149      }
150    }
151
152    for (i = 0; i < 100000; ++i) {
153      _SMP_Send_message(cpu_index, SMP_MESSAGE_TEST);
154    }
155
156    for (i = 0; i < cpu_count; ++i) {
157      if (i != cpu_index) {
158        rtems_test_assert(ctx->copy_counters[i] == ctx->counters[i].value);
159      }
160    }
161  }
162
163  for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
164    rtems_test_assert(
165      _Processor_mask_Is_set(_SMP_Get_online_processors(), cpu_index)
166    );
167
168    printf(
169      "inter-processor interrupts for processor %"
170        PRIu32 "%s: %" PRIu32 "\n",
171      cpu_index,
172      cpu_index == cpu_index_self ? " (main)" : "",
173      ctx->counters[cpu_index].value
174    );
175  }
176
177  for (; cpu_index < CPU_COUNT; ++cpu_index) {
178    rtems_test_assert(
179      !_Processor_mask_Is_set(_SMP_Get_online_processors(), cpu_index)
180    );
181  }
182}
183
184static void test(void)
185{
186  test_context *ctx = &test_instance;
187
188  test_send_message_while_processing_a_message(ctx);
189  test_send_message_flood(ctx);
190}
191
192static void Init(rtems_task_argument arg)
193{
194  TEST_BEGIN();
195
196  test();
197
198  TEST_END();
199  rtems_test_exit(0);
200}
201
202#define CONFIGURE_APPLICATION_DOES_NOT_NEED_CLOCK_DRIVER
203#define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
204
205#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
206
207#define CONFIGURE_MAXIMUM_TASKS 1
208
209#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
210
211#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
212
213#define CONFIGURE_INIT
214
215#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.