source: rtems/testsuites/smptests/smpschededf02/init.c @ e0a9336b

5
Last change on this file since e0a9336b was e0a9336b, checked in by Sebastian Huber <sebastian.huber@…>, on 09/03/18 at 06:12:35

score: Fix EDF SMP scheduler

Fix a special case: block a one-to-one scheduled thread while having a
non-empty affine ready queue on the same processor.

  • Property mode set to 100644
File size: 9.9 KB
Line 
1/*
2 * Copyright (c) 2016, 2018 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#include "tmacros.h"
20
21#include <rtems.h>
22
23const char rtems_test_name[] = "SMPSCHEDEDF 2";
24
25#define CPU_COUNT 2
26
27#define TASK_COUNT 5
28
29#define P(i) (UINT32_C(2) + i)
30
31#define A(cpu0, cpu1) ((cpu1 << 1) | cpu0)
32
33#define IDLE UINT8_C(255)
34
35#define NAME rtems_build_name('E', 'D', 'F', ' ')
36
37typedef struct {
38  enum {
39    KIND_RESET,
40    KIND_SET_PRIORITY,
41    KIND_SET_AFFINITY,
42    KIND_BLOCK,
43    KIND_UNBLOCK
44  } kind;
45
46  size_t index;
47
48  struct {
49    rtems_task_priority priority;
50    uint32_t cpu_set;
51  } data;
52
53  uint8_t expected_cpu_allocations[CPU_COUNT];
54} test_action;
55
56typedef struct {
57  rtems_id timer_id;
58  rtems_id master_id;
59  rtems_id task_ids[TASK_COUNT];
60  size_t action_index;
61} test_context;
62
63#define RESET \
64  { \
65    KIND_RESET, \
66    0, \
67    { 0 }, \
68    { IDLE, IDLE } \
69  }
70
71#define SET_PRIORITY(index, prio, cpu0, cpu1) \
72  { \
73    KIND_SET_PRIORITY, \
74    index, \
75    { .priority = prio }, \
76    { cpu0, cpu1 } \
77  }
78
79#define SET_AFFINITY(index, aff, cpu0, cpu1) \
80  { \
81    KIND_SET_AFFINITY, \
82    index, \
83    { .cpu_set = aff }, \
84    { cpu0, cpu1 } \
85  }
86
87#define BLOCK(index, cpu0, cpu1) \
88  { \
89    KIND_BLOCK, \
90    index, \
91    { 0 }, \
92    { cpu0, cpu1 } \
93  }
94
95#define UNBLOCK(index, cpu0, cpu1) \
96  { \
97    KIND_UNBLOCK, \
98    index, \
99    { 0 }, \
100    { cpu0, cpu1 } \
101  }
102
103static const test_action test_actions[] = {
104  RESET,
105  UNBLOCK(      0,              0, IDLE),
106  UNBLOCK(      1,              0,    1),
107  UNBLOCK(      3,              0,    1),
108  SET_PRIORITY( 1,  P(2),       0,    1),
109  SET_PRIORITY( 3,  P(1),       0,    3),
110  BLOCK(        3,              0,    1),
111  SET_AFFINITY( 1,  A(1, 1),    0,    1),
112  SET_AFFINITY( 1,  A(1, 0),    1,    0),
113  SET_AFFINITY( 1,  A(1, 1),    1,    0),
114  SET_AFFINITY( 1,  A(1, 0),    1,    0),
115  SET_AFFINITY( 1,  A(0, 1),    0,    1),
116  BLOCK(        0,           IDLE,    1),
117  UNBLOCK(      0,              0,    1),
118  BLOCK(        1,              0, IDLE),
119  UNBLOCK(      1,              0,    1),
120  /*
121   * Show that FIFO order is honoured across all threads of the same priority.
122   */
123  RESET,
124  SET_PRIORITY( 1,  P(0),    IDLE, IDLE),
125  SET_PRIORITY( 2,  P(1),    IDLE, IDLE),
126  SET_PRIORITY( 3,  P(1),    IDLE, IDLE),
127  SET_AFFINITY( 3,  A(1, 0), IDLE, IDLE),
128  SET_PRIORITY( 4,  P(1),    IDLE, IDLE),
129  SET_AFFINITY( 4,  A(1, 0), IDLE, IDLE),
130  UNBLOCK(      0,              0, IDLE),
131  UNBLOCK(      1,              0,    1),
132  UNBLOCK(      2,              0,    1),
133  UNBLOCK(      3,              0,    1),
134  UNBLOCK(      4,              0,    1),
135  BLOCK(        1,              0,    2),
136  BLOCK(        2,              3,    0),
137  BLOCK(        3,              4,    0),
138  /*
139   * Schedule a high priority affine thread directly with a low priority affine
140   * thread in the corresponding ready queue.  In this case we, remove the
141   * affine ready queue in _Scheduler_EDF_SMP_Allocate_processor().
142   */
143  RESET,
144  UNBLOCK(      0,              0, IDLE),
145  UNBLOCK(      1,              0,    1),
146  SET_PRIORITY( 1,  P(2),       0,    1),
147  SET_AFFINITY( 3,  A(0, 1),    0,    1),
148  UNBLOCK(      3,              0,    1),
149  SET_PRIORITY( 2,  P(1),       0,    1),
150  SET_AFFINITY( 2,  A(0, 1),    0,    1),
151  UNBLOCK(      2,              0,    2),
152  BLOCK(        1,              0,    2),
153  BLOCK(        2,              0,    3),
154  /* Force migration of a higher priority one-to-all thread */
155  RESET,
156  UNBLOCK(      0,              0, IDLE),
157  SET_AFFINITY( 1,  A(1, 0),    0, IDLE),
158  UNBLOCK(      1,              1,    0),
159  /*
160   * Block a one-to-one thread while having a non-empty affine ready queue on
161   * the same processor.
162   */
163  RESET,
164  SET_AFFINITY( 1,  A(1, 0), IDLE, IDLE),
165  SET_AFFINITY( 3,  A(1, 0), IDLE, IDLE),
166  UNBLOCK(      0,              0, IDLE),
167  UNBLOCK(      1,              1,    0),
168  UNBLOCK(      2,              1,    0),
169  UNBLOCK(      3,              1,    0),
170  BLOCK(        1,              2,    0),
171  BLOCK(        0,              3,    2),
172  /*
173   * Make sure that a one-to-one thread does not get the wrong processor
174   * allocated after selecting the highest ready thread.
175   */
176  RESET,
177  SET_AFFINITY( 1,  A(1, 0), IDLE, IDLE),
178  SET_AFFINITY( 2,  A(1, 0), IDLE, IDLE),
179  UNBLOCK(      0,              0, IDLE),
180  UNBLOCK(      1,              1,    0),
181  UNBLOCK(      2,              1,    0),
182  BLOCK(        0,              1, IDLE),
183  RESET
184};
185
186static test_context test_instance;
187
188static void set_priority(rtems_id id, rtems_task_priority prio)
189{
190  rtems_status_code sc;
191
192  sc = rtems_task_set_priority(id, prio, &prio);
193  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
194}
195
196static void set_affinity(rtems_id id, uint32_t cpu_set_32)
197{
198  rtems_status_code sc;
199  cpu_set_t cpu_set;
200  size_t i;
201
202  CPU_ZERO(&cpu_set);
203
204  for (i = 0; i < CPU_COUNT; ++i) {
205    if ((cpu_set_32 & (UINT32_C(1) << i)) != 0) {
206      CPU_SET(i, &cpu_set);
207    }
208  }
209
210  sc = rtems_task_set_affinity(id, sizeof(cpu_set), &cpu_set);
211  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
212}
213
214static void reset(test_context *ctx)
215{
216  rtems_status_code sc;
217  size_t i;
218
219  for (i = 0; i < TASK_COUNT; ++i) {
220    set_priority(ctx->task_ids[i], P(i));
221    set_affinity(ctx->task_ids[i], A(1, 1));
222  }
223
224  for (i = CPU_COUNT; i < TASK_COUNT; ++i) {
225    sc = rtems_task_suspend(ctx->task_ids[i]);
226    rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_ALREADY_SUSPENDED);
227  }
228
229  for (i = 0; i < CPU_COUNT; ++i) {
230    sc = rtems_task_resume(ctx->task_ids[i]);
231    rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_INCORRECT_STATE);
232  }
233
234  /* Order the idle threads explicitly */
235  for (i = 0; i < CPU_COUNT; ++i) {
236    const Per_CPU_Control *c;
237    const Thread_Control *h;
238
239    c = _Per_CPU_Get_by_index(CPU_COUNT - 1 - i);
240    h = c->heir;
241
242    sc = rtems_task_suspend(h->Object.id);
243    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
244  }
245}
246
247static void check_cpu_allocations(test_context *ctx, const test_action *action)
248{
249  size_t i;
250
251  for (i = 0; i < CPU_COUNT; ++i) {
252    size_t e;
253    const Per_CPU_Control *c;
254    const Thread_Control *h;
255
256    e = action->expected_cpu_allocations[i];
257    c = _Per_CPU_Get_by_index(i);
258    h = c->heir;
259
260    if (e != IDLE) {
261      rtems_test_assert(h->Object.id == ctx->task_ids[e]);
262    } else {
263      rtems_test_assert(h->is_idle);
264    }
265  }
266}
267
268/*
269 * Use a timer to execute the actions, since it runs with thread dispatching
270 * disabled.  This is necessary to check the expected processor allocations.
271 */
272static void timer(rtems_id id, void *arg)
273{
274  test_context *ctx;
275  rtems_status_code sc;
276  size_t i;
277
278  ctx = arg;
279  i = ctx->action_index;
280
281  if (i == 0) {
282    sc = rtems_task_suspend(ctx->master_id);
283    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
284  }
285
286  if (i < RTEMS_ARRAY_SIZE(test_actions)) {
287    const test_action *action = &test_actions[i];
288    rtems_id task;
289
290    ctx->action_index = i + 1;
291
292    task = ctx->task_ids[action->index];
293
294    switch (action->kind) {
295      case KIND_SET_PRIORITY:
296        set_priority(task, action->data.priority);
297        break;
298      case KIND_SET_AFFINITY:
299        set_affinity(task, action->data.cpu_set);
300        break;
301      case KIND_BLOCK:
302        sc = rtems_task_suspend(task);
303        rtems_test_assert(sc == RTEMS_SUCCESSFUL);
304        break;
305      case KIND_UNBLOCK:
306        sc = rtems_task_resume(task);
307        rtems_test_assert(sc == RTEMS_SUCCESSFUL);
308        break;
309      default:
310        rtems_test_assert(action->kind == KIND_RESET);
311        reset(ctx);
312        break;
313    }
314
315    check_cpu_allocations(ctx, action);
316
317    sc = rtems_timer_reset(id);
318    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
319  } else {
320    sc = rtems_task_resume(ctx->master_id);
321    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
322
323    sc = rtems_event_transient_send(ctx->master_id);
324    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
325  }
326}
327
328static void do_nothing_task(rtems_task_argument arg)
329{
330  (void) arg;
331
332  while (true) {
333    /* Do nothing */
334  }
335}
336
337static void test(void)
338{
339  test_context *ctx;
340  rtems_status_code sc;
341  size_t i;
342
343  ctx = &test_instance;
344
345  ctx->master_id = rtems_task_self();
346
347  for (i = 0; i < TASK_COUNT; ++i) {
348    sc = rtems_task_create(
349      NAME,
350      P(i),
351      RTEMS_MINIMUM_STACK_SIZE,
352      RTEMS_DEFAULT_MODES,
353      RTEMS_DEFAULT_ATTRIBUTES,
354      &ctx->task_ids[i]
355    );
356    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
357
358    sc = rtems_task_start(ctx->task_ids[i], do_nothing_task, 0);
359    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
360  }
361
362  sc = rtems_timer_create(NAME, &ctx->timer_id);
363  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
364
365  sc = rtems_timer_fire_after(ctx->timer_id, 1, timer, ctx);
366  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
367
368  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
369  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
370
371  for (i = 0; i < TASK_COUNT; ++i) {
372    sc = rtems_task_delete(ctx->task_ids[i]);
373    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
374  }
375
376  sc = rtems_timer_delete(ctx->timer_id);
377  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
378}
379
380static void Init(rtems_task_argument arg)
381{
382  TEST_BEGIN();
383
384  if (rtems_get_processor_count() == CPU_COUNT) {
385    test();
386  } else {
387    puts("warning: wrong processor count to run the test");
388  }
389
390  TEST_END();
391  rtems_test_exit(0);
392}
393
394#define CONFIGURE_MICROSECONDS_PER_TICK 1000
395
396#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
397#define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
398
399#define CONFIGURE_MAXIMUM_TASKS (1 + TASK_COUNT)
400#define CONFIGURE_MAXIMUM_TIMERS 1
401
402#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
403
404#define CONFIGURE_SCHEDULER_EDF_SMP
405
406#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
407
408#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
409
410#define CONFIGURE_INIT
411
412#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.