source: rtems/testsuites/smptests/smpscheduler03/init.c @ 2df4abc

5
Last change on this file since 2df4abc was 2df4abc, checked in by Sebastian Huber <sebastian.huber@…>, on 10/10/16 at 12:01:55

score: Pass scheduler node to yield operation

Changed for consistency with other scheduler operations.

Update #2556.

  • Property mode set to 100644
File size: 17.2 KB
Line 
1/*
2 * Copyright (c) 2014-2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#include <stdio.h>
20#include <inttypes.h>
21
22#include <rtems.h>
23#include <rtems/libcsupport.h>
24#include <rtems/score/threadimpl.h>
25#include <rtems/score/schedulersmpimpl.h>
26
27#include "tmacros.h"
28
29const char rtems_test_name[] = "SMPSCHEDULER 3";
30
31#define CPU_MAX 3
32
33#define SCHED_NAME(i) rtems_build_name(' ', ' ', ' ', (char) ('A' + (i)))
34
35typedef struct {
36  rtems_id barrier_id;
37  rtems_id task_id[CPU_MAX];
38  uint32_t cpu_index[CPU_MAX];
39} test_context;
40
41static test_context test_instance;
42
43static Scheduler_SMP_Node *get_scheduler_node(Thread_Control *thread)
44{
45  return _Scheduler_SMP_Node_downcast(_Thread_Scheduler_get_home_node(thread));
46}
47
48static void apply_priority(
49  Thread_Control *thread,
50  Priority_Control new_priority,
51  bool prepend_it,
52  Thread_queue_Context *queue_context
53)
54{
55  _Thread_queue_Context_clear_priority_updates(queue_context);
56  _Thread_Wait_acquire(thread, queue_context);
57  _Thread_Priority_change(
58    thread,
59    &thread->Real_priority,
60    new_priority,
61    prepend_it,
62    queue_context
63  );
64  _Thread_Wait_release(thread, queue_context);
65}
66
67static void change_priority(
68  Thread_Control *thread,
69  Priority_Control new_priority,
70  bool prepend_it
71)
72{
73  Thread_queue_Context queue_context;
74
75  apply_priority(thread, new_priority, prepend_it, &queue_context);
76  _Thread_Priority_update(&queue_context);
77}
78
79static void barrier_wait(test_context *ctx)
80{
81  rtems_status_code sc;
82
83  sc = rtems_barrier_wait(ctx->barrier_id, RTEMS_NO_TIMEOUT);
84  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
85}
86
87static void task(rtems_task_argument arg)
88{
89  rtems_test_assert(0);
90}
91
92static rtems_id start_task(rtems_task_priority prio)
93{
94  rtems_status_code sc;
95  rtems_id task_id;
96
97  sc = rtems_task_create(
98    rtems_build_name('T', 'A', 'S', 'K'),
99    prio,
100    RTEMS_MINIMUM_STACK_SIZE,
101    RTEMS_DEFAULT_MODES,
102    RTEMS_DEFAULT_ATTRIBUTES,
103    &task_id
104  );
105  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
106
107  sc = rtems_task_start(task_id, task, 0);
108  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
109
110  return task_id;
111}
112
113static Thread_Control *get_thread_by_id(rtems_id task_id)
114{
115  ISR_lock_Context lock_context;
116  Thread_Control *thread;
117
118  thread = _Thread_Get(task_id, &lock_context);
119  rtems_test_assert(thread != NULL);
120  _ISR_lock_ISR_enable(&lock_context);
121
122  return thread;
123}
124
125static void test_case_change_priority(
126  Thread_Control *executing,
127  Scheduler_SMP_Node *executing_node,
128  Scheduler_SMP_Node_state start_state,
129  Priority_Control prio,
130  bool prepend_it,
131  Scheduler_SMP_Node_state new_state
132)
133{
134  Per_CPU_Control *cpu_self;
135
136  cpu_self = _Thread_Dispatch_disable();
137
138  switch (start_state) {
139    case SCHEDULER_SMP_NODE_SCHEDULED:
140      change_priority(executing, 1, true);
141      break;
142    case SCHEDULER_SMP_NODE_READY:
143      change_priority(executing, 4, true);
144      break;
145    default:
146      rtems_test_assert(0);
147      break;
148  }
149  rtems_test_assert(executing_node->state == start_state);
150
151  change_priority(executing, prio, prepend_it);
152  rtems_test_assert(executing_node->state == new_state);
153
154  change_priority(executing, 1, true);
155  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
156
157  _Thread_Dispatch_enable( cpu_self );
158}
159
160static const Scheduler_SMP_Node_state states[2] = {
161  SCHEDULER_SMP_NODE_SCHEDULED,
162  SCHEDULER_SMP_NODE_READY
163};
164
165static const Priority_Control priorities[2] = { 2, 5 };
166
167static const bool prepend_it[2] = { true, false };
168
169static void test_change_priority(void)
170{
171  rtems_status_code sc;
172  rtems_id task_id;
173  Thread_Control *executing;
174  Scheduler_SMP_Node *executing_node;
175  size_t i;
176  size_t j;
177  size_t k;
178
179  task_id = start_task(3);
180  executing = _Thread_Get_executing();
181  executing_node = get_scheduler_node(executing);
182
183  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
184    for (j = 0; j < RTEMS_ARRAY_SIZE(priorities); ++j) {
185      for (k = 0; k < RTEMS_ARRAY_SIZE(prepend_it); ++k) {
186        test_case_change_priority(
187          executing,
188          executing_node,
189          states[i],
190          priorities[j],
191          prepend_it[k],
192          states[j]
193        );
194      }
195    }
196  }
197
198  sc = rtems_task_delete(task_id);
199  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
200}
201
202static Thread_Control *update_priority_op(
203  Thread_Control *thread,
204  Scheduler_SMP_Node *scheduler_node,
205  Priority_Control new_priority,
206  bool prepend_it
207)
208{
209  const Scheduler_Control *scheduler;
210  ISR_lock_Context state_lock_context;
211  ISR_lock_Context scheduler_lock_context;
212  Thread_Control *needs_help;
213  Thread_queue_Context queue_context;
214
215  apply_priority(thread, new_priority, prepend_it, &queue_context);
216
217  _Thread_State_acquire( thread, &state_lock_context );
218  scheduler = _Scheduler_Get( thread );
219  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
220
221  needs_help = (*scheduler->Operations.update_priority)(
222    scheduler,
223    thread,
224    &scheduler_node->Base
225  );
226
227  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
228  _Thread_State_release( thread, &state_lock_context );
229
230  return needs_help;
231}
232
233static void test_case_update_priority_op(
234  Thread_Control *executing,
235  Scheduler_SMP_Node *executing_node,
236  Thread_Control *other,
237  Scheduler_SMP_Node_state start_state,
238  Priority_Control prio,
239  bool prepend_it,
240  Scheduler_SMP_Node_state new_state
241)
242{
243  Thread_Control *needs_help;
244  Per_CPU_Control *cpu_self;
245
246  cpu_self = _Thread_Dispatch_disable();
247
248  switch (start_state) {
249    case SCHEDULER_SMP_NODE_SCHEDULED:
250      change_priority(executing, 1, true);
251      break;
252    case SCHEDULER_SMP_NODE_READY:
253      change_priority(executing, 4, true);
254      break;
255    default:
256      rtems_test_assert(0);
257      break;
258  }
259  rtems_test_assert(executing_node->state == start_state);
260
261  needs_help = update_priority_op(executing, executing_node, prio, prepend_it);
262  rtems_test_assert(executing_node->state == new_state);
263
264  if (start_state != new_state) {
265    switch (start_state) {
266      case SCHEDULER_SMP_NODE_SCHEDULED:
267        rtems_test_assert(needs_help == executing);
268        break;
269      case SCHEDULER_SMP_NODE_READY:
270        rtems_test_assert(needs_help == other);
271        break;
272      default:
273        rtems_test_assert(0);
274        break;
275    }
276  } else {
277    rtems_test_assert(needs_help == NULL);
278  }
279
280  change_priority(executing, 1, true);
281  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
282
283  _Thread_Dispatch_enable( cpu_self );
284}
285
286static void test_update_priority_op(void)
287{
288  rtems_status_code sc;
289  rtems_id task_id;
290  Thread_Control *executing;
291  Scheduler_SMP_Node *executing_node;
292  Thread_Control *other;
293  size_t i;
294  size_t j;
295  size_t k;
296
297  task_id = start_task(3);
298  executing = _Thread_Get_executing();
299  executing_node = get_scheduler_node(executing);
300
301  other = get_thread_by_id(task_id);
302
303  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
304    for (j = 0; j < RTEMS_ARRAY_SIZE(priorities); ++j) {
305      for (k = 0; k < RTEMS_ARRAY_SIZE(prepend_it); ++k) {
306        test_case_update_priority_op(
307          executing,
308          executing_node,
309          other,
310          states[i],
311          priorities[j],
312          prepend_it[k],
313          states[j]
314        );
315      }
316    }
317  }
318
319  sc = rtems_task_delete(task_id);
320  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
321}
322
323static Thread_Control *yield_op(
324  Thread_Control *thread,
325  Scheduler_SMP_Node *scheduler_node
326)
327{
328  const Scheduler_Control *scheduler;
329  ISR_lock_Context state_lock_context;
330  ISR_lock_Context scheduler_lock_context;
331  Thread_Control *needs_help;
332
333  _Thread_State_acquire( thread, &state_lock_context );
334  scheduler = _Scheduler_Get( thread );
335  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
336
337  needs_help = (*scheduler->Operations.yield)(
338    scheduler,
339    thread,
340    &scheduler_node->Base
341  );
342
343  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
344  _Thread_State_release( thread, &state_lock_context );
345
346  return needs_help;
347}
348
349static void test_case_yield_op(
350  Thread_Control *executing,
351  Scheduler_SMP_Node *executing_node,
352  Thread_Control *other,
353  Scheduler_SMP_Node_state start_state,
354  Scheduler_SMP_Node_state new_state
355)
356{
357  Thread_Control *needs_help;
358  Per_CPU_Control *cpu_self;
359
360  cpu_self = _Thread_Dispatch_disable();
361
362  change_priority(executing, 4, false);
363  change_priority(other, 4, false);
364
365  switch (start_state) {
366    case SCHEDULER_SMP_NODE_SCHEDULED:
367      switch (new_state) {
368        case SCHEDULER_SMP_NODE_SCHEDULED:
369          change_priority(executing, 2, false);
370          change_priority(other, 3, false);
371          break;
372        case SCHEDULER_SMP_NODE_READY:
373          change_priority(executing, 2, false);
374          change_priority(other, 2, false);
375          break;
376        default:
377          rtems_test_assert(0);
378          break;
379      }
380      break;
381    case SCHEDULER_SMP_NODE_READY:
382      switch (new_state) {
383        case SCHEDULER_SMP_NODE_SCHEDULED:
384          rtems_test_assert(0);
385          break;
386        case SCHEDULER_SMP_NODE_READY:
387          change_priority(executing, 3, false);
388          change_priority(other, 2, false);
389          break;
390        default:
391          rtems_test_assert(0);
392          break;
393      }
394      break;
395    default:
396      rtems_test_assert(0);
397      break;
398  }
399  rtems_test_assert(executing_node->state == start_state);
400
401  needs_help = yield_op(executing, executing_node);
402  rtems_test_assert(executing_node->state == new_state);
403
404  if (start_state != new_state) {
405    switch (start_state) {
406      case SCHEDULER_SMP_NODE_SCHEDULED:
407        rtems_test_assert(needs_help == executing);
408        break;
409      case SCHEDULER_SMP_NODE_READY:
410        rtems_test_assert(needs_help == other);
411        break;
412      default:
413        rtems_test_assert(0);
414        break;
415    }
416  } else {
417    rtems_test_assert(needs_help == NULL);
418  }
419
420  change_priority(executing, 1, true);
421  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
422
423  _Thread_Dispatch_enable( cpu_self );
424}
425
426static void test_yield_op(void)
427{
428  rtems_status_code sc;
429  rtems_id task_id;
430  Thread_Control *executing;
431  Scheduler_SMP_Node *executing_node;
432  Thread_Control *other;
433  size_t i;
434  size_t j;
435
436  task_id = start_task(2);
437  executing = _Thread_Get_executing();
438  executing_node = get_scheduler_node(executing);
439
440  other = get_thread_by_id(task_id);
441
442  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
443    for (j = 0; j < RTEMS_ARRAY_SIZE(states); ++j) {
444      if (
445        states[i] != SCHEDULER_SMP_NODE_READY
446          || states[j] != SCHEDULER_SMP_NODE_SCHEDULED
447      ) {
448        test_case_yield_op(
449          executing,
450          executing_node,
451          other,
452          states[i],
453          states[j]
454        );
455      }
456    }
457  }
458
459  sc = rtems_task_delete(task_id);
460  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
461}
462
463static void block_op(Thread_Control *thread)
464{
465  const Scheduler_Control *scheduler;
466  ISR_lock_Context state_lock_context;
467  ISR_lock_Context scheduler_lock_context;
468
469  _Thread_State_acquire( thread, &state_lock_context );
470  scheduler = _Scheduler_Get( thread );
471  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
472
473  (*scheduler->Operations.block)(scheduler, thread);
474
475  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
476  _Thread_State_release( thread, &state_lock_context );
477}
478
479static Thread_Control *unblock_op(Thread_Control *thread)
480{
481  const Scheduler_Control *scheduler;
482  ISR_lock_Context state_lock_context;
483  ISR_lock_Context scheduler_lock_context;
484  Thread_Control *needs_help;
485
486  _Thread_State_acquire( thread, &state_lock_context );
487  scheduler = _Scheduler_Get( thread );
488  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
489
490  needs_help = (*scheduler->Operations.unblock)(scheduler, thread);
491
492  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
493  _Thread_State_release( thread, &state_lock_context );
494
495  return needs_help;
496}
497
498static void test_case_unblock_op(
499  Thread_Control *executing,
500  Scheduler_SMP_Node *executing_node,
501  Thread_Control *other,
502  Scheduler_SMP_Node_state new_state
503)
504{
505  Thread_Control *needs_help;
506  Per_CPU_Control *cpu_self;
507
508  cpu_self = _Thread_Dispatch_disable();
509
510  switch (new_state) {
511    case SCHEDULER_SMP_NODE_SCHEDULED:
512      change_priority(executing, 2, false);
513      rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
514      break;
515    case SCHEDULER_SMP_NODE_READY:
516      change_priority(executing, 4, false);
517      rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_READY);
518      break;
519    default:
520      rtems_test_assert(0);
521      break;
522  }
523
524  block_op(executing);
525  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_BLOCKED);
526
527  needs_help = unblock_op(executing);
528  rtems_test_assert(executing_node->state == new_state);
529
530  switch (new_state) {
531    case SCHEDULER_SMP_NODE_SCHEDULED:
532      rtems_test_assert(needs_help == other);
533      break;
534    case SCHEDULER_SMP_NODE_READY:
535      rtems_test_assert(needs_help == executing);
536      break;
537    default:
538      rtems_test_assert(0);
539      break;
540  }
541
542  change_priority(executing, 1, true);
543  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
544
545  _Thread_Dispatch_enable( cpu_self );
546}
547
548static void test_unblock_op(void)
549{
550  rtems_status_code sc;
551  rtems_id task_id;
552  Thread_Control *executing;
553  Scheduler_SMP_Node *executing_node;
554  Thread_Control *other;
555  size_t i;
556
557  task_id = start_task(3);
558  executing = _Thread_Get_executing();
559  executing_node = get_scheduler_node(executing);
560
561  other = get_thread_by_id(task_id);
562
563  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
564    test_case_unblock_op(
565      executing,
566      executing_node,
567      other,
568      states[i]
569    );
570  }
571
572  sc = rtems_task_delete(task_id);
573  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
574}
575
576static void tests(void)
577{
578  test_change_priority();
579  test_update_priority_op();
580  test_yield_op();
581  test_unblock_op();
582}
583
584static void test_task(rtems_task_argument arg)
585{
586  test_context *ctx = &test_instance;
587
588  tests();
589
590  ctx->cpu_index[arg] = rtems_get_current_processor();
591
592  barrier_wait(ctx);
593
594  rtems_task_suspend(RTEMS_SELF);
595  rtems_test_assert(0);
596}
597
598static void done(uint32_t cpu_index)
599{
600  printf("test done on processor %" PRIu32 "\n", cpu_index);
601}
602
603static void Init(rtems_task_argument arg)
604{
605  test_context *ctx = &test_instance;
606  rtems_status_code sc;
607  rtems_resource_snapshot snapshot;
608  uint32_t cpu_count = rtems_get_processor_count();
609  uint32_t cpu_index;
610
611  TEST_BEGIN();
612
613  rtems_resource_snapshot_take(&snapshot);
614
615  sc = rtems_barrier_create(
616    rtems_build_name('B', 'A', 'R', 'I'),
617    RTEMS_BARRIER_AUTOMATIC_RELEASE,
618    cpu_count,
619    &ctx->barrier_id
620  );
621  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
622
623  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
624    rtems_id scheduler_id;
625
626    sc = rtems_task_create(
627      rtems_build_name('T', 'A', 'S', 'K'),
628      255,
629      RTEMS_MINIMUM_STACK_SIZE,
630      RTEMS_DEFAULT_MODES,
631      RTEMS_DEFAULT_ATTRIBUTES,
632      &ctx->task_id[cpu_index]
633    );
634    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
635
636    sc = rtems_scheduler_ident(SCHED_NAME(cpu_index), &scheduler_id);
637    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
638
639    sc = rtems_task_set_scheduler(ctx->task_id[cpu_index], scheduler_id, 1);
640    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
641
642    sc = rtems_task_start(ctx->task_id[cpu_index], test_task, cpu_index);
643    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
644  }
645
646  tests();
647
648  barrier_wait(ctx);
649
650  sc = rtems_barrier_delete(ctx->barrier_id);
651  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
652
653  done(0);
654
655  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
656    sc = rtems_task_delete(ctx->task_id[cpu_index]);
657    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
658
659    rtems_test_assert(ctx->cpu_index[cpu_index] == cpu_index);
660
661    done(cpu_index);
662  }
663
664  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
665
666  TEST_END();
667  rtems_test_exit(0);
668}
669
670#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
671#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
672
673#define CONFIGURE_SMP_APPLICATION
674
675#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_MAX
676
677#define CONFIGURE_MAXIMUM_PRIORITY 255
678
679#define CONFIGURE_SCHEDULER_PRIORITY_SMP
680#define CONFIGURE_SCHEDULER_SIMPLE_SMP
681#define CONFIGURE_SCHEDULER_PRIORITY_AFFINITY_SMP
682
683#include <rtems/scheduler.h>
684
685RTEMS_SCHEDULER_CONTEXT_PRIORITY_SMP(a, CONFIGURE_MAXIMUM_PRIORITY + 1);
686
687RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(b);
688
689RTEMS_SCHEDULER_CONTEXT_PRIORITY_AFFINITY_SMP(
690  c,
691  CONFIGURE_MAXIMUM_PRIORITY + 1
692);
693
694#define CONFIGURE_SCHEDULER_CONTROLS \
695  RTEMS_SCHEDULER_CONTROL_PRIORITY_SMP(a, SCHED_NAME(0)), \
696  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(b, SCHED_NAME(1)), \
697  RTEMS_SCHEDULER_CONTROL_PRIORITY_AFFINITY_SMP(c, SCHED_NAME(2))
698
699#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
700  RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
701  RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
702  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
703
704#define CONFIGURE_MAXIMUM_TASKS 6
705#define CONFIGURE_MAXIMUM_BARRIERS 1
706
707#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
708
709#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
710
711#define CONFIGURE_INIT
712
713#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.