source: rtems/testsuites/smptests/smpschededf02/init.c @ b89b442

5
Last change on this file since b89b442 was b89b442, checked in by Sebastian Huber <sebastian.huber@…>, on 02/14/20 at 14:51:01

smpschededf02: Improve readability

  • Property mode set to 100644
File size: 10.0 KB
Line 
1/*
2 * Copyright (c) 2016, 2020 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16#include "config.h"
17#endif
18
19#include <tmacros.h>
20
21#include <rtems.h>
22
23const char rtems_test_name[] = "SMPSCHEDEDF 2";
24
25#define CPU_COUNT 2
26
27#define TASK_COUNT 5
28
29#define P(i) (UINT32_C(2) + i)
30
31#define A(cpu0, cpu1) ((cpu1 << 1) | cpu0)
32
33typedef enum {
34  T0,
35  T1,
36  T2,
37  T3,
38  T4,
39  IDLE
40} task_index;
41
42typedef struct {
43  enum {
44    KIND_RESET,
45    KIND_SET_PRIORITY,
46    KIND_SET_AFFINITY,
47    KIND_BLOCK,
48    KIND_UNBLOCK
49  } kind;
50
51  task_index index;
52
53  struct {
54    rtems_task_priority priority;
55    uint32_t cpu_set;
56  } data;
57
58  uint8_t expected_cpu_allocations[CPU_COUNT];
59} test_action;
60
61typedef struct {
62  rtems_id timer_id;
63  rtems_id master_id;
64  rtems_id task_ids[TASK_COUNT];
65  size_t action_index;
66} test_context;
67
68#define RESET \
69  { \
70    KIND_RESET, \
71    0, \
72    { 0 }, \
73    { IDLE, IDLE } \
74  }
75
76#define SET_PRIORITY(index, prio, cpu0, cpu1) \
77  { \
78    KIND_SET_PRIORITY, \
79    index, \
80    { .priority = prio }, \
81    { cpu0, cpu1 } \
82  }
83
84#define SET_AFFINITY(index, aff, cpu0, cpu1) \
85  { \
86    KIND_SET_AFFINITY, \
87    index, \
88    { .cpu_set = aff }, \
89    { cpu0, cpu1 } \
90  }
91
92#define BLOCK(index, cpu0, cpu1) \
93  { \
94    KIND_BLOCK, \
95    index, \
96    { 0 }, \
97    { cpu0, cpu1 } \
98  }
99
100#define UNBLOCK(index, cpu0, cpu1) \
101  { \
102    KIND_UNBLOCK, \
103    index, \
104    { 0 }, \
105    { cpu0, cpu1 } \
106  }
107
108static const test_action test_actions[] = {
109  RESET,
110  UNBLOCK(      T0,             T0, IDLE),
111  UNBLOCK(      T1,             T0,   T1),
112  UNBLOCK(      T3,             T0,   T1),
113  SET_PRIORITY( T1,  P(2),      T0,   T1),
114  SET_PRIORITY( T3,  P(1),      T0,   T3),
115  BLOCK(        T3,             T0,   T1),
116  SET_AFFINITY( T1,  A(1, 1),   T0,   T1),
117  SET_AFFINITY( T1,  A(1, 0),   T1,   T0),
118  SET_AFFINITY( T1,  A(1, 1),   T1,   T0),
119  SET_AFFINITY( T1,  A(1, 0),   T1,   T0),
120  SET_AFFINITY( T1,  A(0, 1),   T0,   T1),
121  BLOCK(        T0,           IDLE,   T1),
122  UNBLOCK(      T0,             T0,   T1),
123  BLOCK(        T1,             T0, IDLE),
124  UNBLOCK(      T1,             T0,   T1),
125  /*
126   * Show that FIFO order is honoured across all threads of the same priority.
127   */
128  RESET,
129  SET_PRIORITY( T1,  P(0),    IDLE, IDLE),
130  SET_PRIORITY( T2,  P(1),    IDLE, IDLE),
131  SET_PRIORITY( T3,  P(1),    IDLE, IDLE),
132  SET_AFFINITY( T3,  A(1, 0), IDLE, IDLE),
133  SET_PRIORITY( T4,  P(1),    IDLE, IDLE),
134  SET_AFFINITY( T4,  A(1, 0), IDLE, IDLE),
135  UNBLOCK(      T0,             T0, IDLE),
136  UNBLOCK(      T1,             T0,   T1),
137  UNBLOCK(      T2,             T0,   T1),
138  UNBLOCK(      T3,             T0,   T1),
139  UNBLOCK(      T4,             T0,   T1),
140  BLOCK(        T1,             T0,   T2),
141  BLOCK(        T2,             T3,   T0),
142  BLOCK(        T3,             T4,   T0),
143  /*
144   * Schedule a high priority affine thread directly with a low priority affine
145   * thread in the corresponding ready queue.  In this case we, remove the
146   * affine ready queue in _Scheduler_EDF_SMP_Allocate_processor().
147   */
148  RESET,
149  UNBLOCK(      T0,             T0, IDLE),
150  UNBLOCK(      T1,             T0,   T1),
151  SET_PRIORITY( T1,  P(2),      T0,   T1),
152  SET_AFFINITY( T3,  A(0, 1),   T0,   T1),
153  UNBLOCK(      T3,             T0,   T1),
154  SET_PRIORITY( T2,  P(1),      T0,   T1),
155  SET_AFFINITY( T2,  A(0, 1),   T0,   T1),
156  UNBLOCK(      T2,             T0,   T2),
157  BLOCK(        T1,             T0,   T2),
158  BLOCK(        T2,             T0,   T3),
159  /* Force migration of a higher priority one-to-all thread */
160  RESET,
161  UNBLOCK(      T0,             T0, IDLE),
162  SET_AFFINITY( T1,  A(1, 0),   T0, IDLE),
163  UNBLOCK(      T1,             T1,   T0),
164  /*
165   * Block a one-to-one thread while having a non-empty affine ready queue on
166   * the same processor.
167   */
168  RESET,
169  SET_AFFINITY( T1,  A(1, 0), IDLE, IDLE),
170  SET_AFFINITY( T3,  A(1, 0), IDLE, IDLE),
171  UNBLOCK(      T0,             T0, IDLE),
172  UNBLOCK(      T1,             T1,   T0),
173  UNBLOCK(      T2,             T1,   T0),
174  UNBLOCK(      T3,             T1,   T0),
175  BLOCK(        T1,             T2,   T0),
176  BLOCK(        T0,             T3,   T2),
177  /*
178   * Make sure that a one-to-one thread does not get the wrong processor
179   * allocated after selecting the highest ready thread.
180   */
181  RESET,
182  SET_AFFINITY( T1,  A(1, 0), IDLE, IDLE),
183  SET_AFFINITY( T2,  A(1, 0), IDLE, IDLE),
184  UNBLOCK(      T0,             T0, IDLE),
185  UNBLOCK(      T1,             T1,   T0),
186  UNBLOCK(      T2,             T1,   T0),
187  BLOCK(        T0,             T1, IDLE),
188  RESET
189};
190
191static test_context test_instance;
192
193static void set_priority(rtems_id id, rtems_task_priority prio)
194{
195  rtems_status_code sc;
196
197  sc = rtems_task_set_priority(id, prio, &prio);
198  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
199}
200
201static void set_affinity(rtems_id id, uint32_t cpu_set_32)
202{
203  rtems_status_code sc;
204  cpu_set_t cpu_set;
205  size_t i;
206
207  CPU_ZERO(&cpu_set);
208
209  for (i = 0; i < CPU_COUNT; ++i) {
210    if ((cpu_set_32 & (UINT32_C(1) << i)) != 0) {
211      CPU_SET(i, &cpu_set);
212    }
213  }
214
215  sc = rtems_task_set_affinity(id, sizeof(cpu_set), &cpu_set);
216  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
217}
218
219static void reset(test_context *ctx)
220{
221  rtems_status_code sc;
222  size_t i;
223
224  for (i = 0; i < TASK_COUNT; ++i) {
225    set_priority(ctx->task_ids[i], P(i));
226    set_affinity(ctx->task_ids[i], A(1, 1));
227  }
228
229  for (i = CPU_COUNT; i < TASK_COUNT; ++i) {
230    sc = rtems_task_suspend(ctx->task_ids[i]);
231    rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_ALREADY_SUSPENDED);
232  }
233
234  for (i = 0; i < CPU_COUNT; ++i) {
235    sc = rtems_task_resume(ctx->task_ids[i]);
236    rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_INCORRECT_STATE);
237  }
238
239  /* Order the idle threads explicitly */
240  for (i = 0; i < CPU_COUNT; ++i) {
241    const Per_CPU_Control *c;
242    const Thread_Control *h;
243
244    c = _Per_CPU_Get_by_index(CPU_COUNT - 1 - i);
245    h = c->heir;
246
247    sc = rtems_task_suspend(h->Object.id);
248    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
249  }
250}
251
252static void check_cpu_allocations(test_context *ctx, const test_action *action)
253{
254  size_t i;
255
256  for (i = 0; i < CPU_COUNT; ++i) {
257    task_index e;
258    const Per_CPU_Control *c;
259    const Thread_Control *h;
260
261    e = action->expected_cpu_allocations[i];
262    c = _Per_CPU_Get_by_index(i);
263    h = c->heir;
264
265    if (e != IDLE) {
266      rtems_test_assert(h->Object.id == ctx->task_ids[e]);
267    } else {
268      rtems_test_assert(h->is_idle);
269    }
270  }
271}
272
273/*
274 * Use a timer to execute the actions, since it runs with thread dispatching
275 * disabled.  This is necessary to check the expected processor allocations.
276 */
277static void timer(rtems_id id, void *arg)
278{
279  test_context *ctx;
280  rtems_status_code sc;
281  size_t i;
282
283  ctx = arg;
284  i = ctx->action_index;
285
286  if (i == 0) {
287    sc = rtems_task_suspend(ctx->master_id);
288    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
289  }
290
291  if (i < RTEMS_ARRAY_SIZE(test_actions)) {
292    const test_action *action = &test_actions[i];
293    rtems_id task;
294
295    ctx->action_index = i + 1;
296
297    task = ctx->task_ids[action->index];
298
299    switch (action->kind) {
300      case KIND_SET_PRIORITY:
301        set_priority(task, action->data.priority);
302        break;
303      case KIND_SET_AFFINITY:
304        set_affinity(task, action->data.cpu_set);
305        break;
306      case KIND_BLOCK:
307        sc = rtems_task_suspend(task);
308        rtems_test_assert(sc == RTEMS_SUCCESSFUL);
309        break;
310      case KIND_UNBLOCK:
311        sc = rtems_task_resume(task);
312        rtems_test_assert(sc == RTEMS_SUCCESSFUL);
313        break;
314      default:
315        rtems_test_assert(action->kind == KIND_RESET);
316        reset(ctx);
317        break;
318    }
319
320    check_cpu_allocations(ctx, action);
321
322    sc = rtems_timer_reset(id);
323    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
324  } else {
325    sc = rtems_task_resume(ctx->master_id);
326    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
327
328    sc = rtems_event_transient_send(ctx->master_id);
329    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
330  }
331}
332
333static void do_nothing_task(rtems_task_argument arg)
334{
335  (void) arg;
336
337  while (true) {
338    /* Do nothing */
339  }
340}
341
342static void test(void)
343{
344  test_context *ctx;
345  rtems_status_code sc;
346  size_t i;
347
348  ctx = &test_instance;
349
350  ctx->master_id = rtems_task_self();
351
352  for (i = 0; i < TASK_COUNT; ++i) {
353    sc = rtems_task_create(
354      rtems_build_name(' ', ' ', 'T', '0' + i),
355      P(i),
356      RTEMS_MINIMUM_STACK_SIZE,
357      RTEMS_DEFAULT_MODES,
358      RTEMS_DEFAULT_ATTRIBUTES,
359      &ctx->task_ids[i]
360    );
361    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
362
363    sc = rtems_task_start(ctx->task_ids[i], do_nothing_task, 0);
364    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
365  }
366
367  sc = rtems_timer_create(
368    rtems_build_name('A', 'C', 'T', 'N'),
369    &ctx->timer_id
370  );
371  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
372
373  sc = rtems_timer_fire_after(ctx->timer_id, 1, timer, ctx);
374  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
375
376  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
377  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
378
379  for (i = 0; i < TASK_COUNT; ++i) {
380    sc = rtems_task_delete(ctx->task_ids[i]);
381    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
382  }
383
384  sc = rtems_timer_delete(ctx->timer_id);
385  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
386}
387
388static void Init(rtems_task_argument arg)
389{
390  TEST_BEGIN();
391
392  if (rtems_scheduler_get_processor_maximum() == CPU_COUNT) {
393    test();
394  } else {
395    puts("warning: wrong processor count to run the test");
396  }
397
398  TEST_END();
399  rtems_test_exit(0);
400}
401
402#define CONFIGURE_MICROSECONDS_PER_TICK 1000
403
404#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
405#define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
406
407#define CONFIGURE_MAXIMUM_TASKS (1 + TASK_COUNT)
408#define CONFIGURE_MAXIMUM_TIMERS 1
409
410#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
411
412#define CONFIGURE_SCHEDULER_EDF_SMP
413
414#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
415
416#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
417
418#define CONFIGURE_INIT
419
420#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.