source: rtems/testsuites/smptests/smpscheduler03/init.c @ 72e0bdb

5
Last change on this file since 72e0bdb was 72e0bdb, checked in by Sebastian Huber <sebastian.huber@…>, on 10/10/16 at 12:50:19

score: Pass scheduler node to unblock operation

Changed for consistency with other scheduler operations.

Update #2556.

  • Property mode set to 100644
File size: 17.4 KB
Line 
1/*
2 * Copyright (c) 2014-2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#include <stdio.h>
20#include <inttypes.h>
21
22#include <rtems.h>
23#include <rtems/libcsupport.h>
24#include <rtems/score/threadimpl.h>
25#include <rtems/score/schedulersmpimpl.h>
26
27#include "tmacros.h"
28
29const char rtems_test_name[] = "SMPSCHEDULER 3";
30
31#define CPU_MAX 3
32
33#define SCHED_NAME(i) rtems_build_name(' ', ' ', ' ', (char) ('A' + (i)))
34
35typedef struct {
36  rtems_id barrier_id;
37  rtems_id task_id[CPU_MAX];
38  uint32_t cpu_index[CPU_MAX];
39} test_context;
40
41static test_context test_instance;
42
43static Scheduler_SMP_Node *get_scheduler_node(Thread_Control *thread)
44{
45  return _Scheduler_SMP_Node_downcast(_Thread_Scheduler_get_home_node(thread));
46}
47
48static void apply_priority(
49  Thread_Control *thread,
50  Priority_Control new_priority,
51  bool prepend_it,
52  Thread_queue_Context *queue_context
53)
54{
55  _Thread_queue_Context_clear_priority_updates(queue_context);
56  _Thread_Wait_acquire(thread, queue_context);
57  _Thread_Priority_change(
58    thread,
59    &thread->Real_priority,
60    new_priority,
61    prepend_it,
62    queue_context
63  );
64  _Thread_Wait_release(thread, queue_context);
65}
66
67static void change_priority(
68  Thread_Control *thread,
69  Priority_Control new_priority,
70  bool prepend_it
71)
72{
73  Thread_queue_Context queue_context;
74
75  apply_priority(thread, new_priority, prepend_it, &queue_context);
76  _Thread_Priority_update(&queue_context);
77}
78
79static void barrier_wait(test_context *ctx)
80{
81  rtems_status_code sc;
82
83  sc = rtems_barrier_wait(ctx->barrier_id, RTEMS_NO_TIMEOUT);
84  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
85}
86
87static void task(rtems_task_argument arg)
88{
89  rtems_test_assert(0);
90}
91
92static rtems_id start_task(rtems_task_priority prio)
93{
94  rtems_status_code sc;
95  rtems_id task_id;
96
97  sc = rtems_task_create(
98    rtems_build_name('T', 'A', 'S', 'K'),
99    prio,
100    RTEMS_MINIMUM_STACK_SIZE,
101    RTEMS_DEFAULT_MODES,
102    RTEMS_DEFAULT_ATTRIBUTES,
103    &task_id
104  );
105  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
106
107  sc = rtems_task_start(task_id, task, 0);
108  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
109
110  return task_id;
111}
112
113static Thread_Control *get_thread_by_id(rtems_id task_id)
114{
115  ISR_lock_Context lock_context;
116  Thread_Control *thread;
117
118  thread = _Thread_Get(task_id, &lock_context);
119  rtems_test_assert(thread != NULL);
120  _ISR_lock_ISR_enable(&lock_context);
121
122  return thread;
123}
124
125static void test_case_change_priority(
126  Thread_Control *executing,
127  Scheduler_SMP_Node *executing_node,
128  Scheduler_SMP_Node_state start_state,
129  Priority_Control prio,
130  bool prepend_it,
131  Scheduler_SMP_Node_state new_state
132)
133{
134  Per_CPU_Control *cpu_self;
135
136  cpu_self = _Thread_Dispatch_disable();
137
138  switch (start_state) {
139    case SCHEDULER_SMP_NODE_SCHEDULED:
140      change_priority(executing, 1, true);
141      break;
142    case SCHEDULER_SMP_NODE_READY:
143      change_priority(executing, 4, true);
144      break;
145    default:
146      rtems_test_assert(0);
147      break;
148  }
149  rtems_test_assert(executing_node->state == start_state);
150
151  change_priority(executing, prio, prepend_it);
152  rtems_test_assert(executing_node->state == new_state);
153
154  change_priority(executing, 1, true);
155  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
156
157  _Thread_Dispatch_enable( cpu_self );
158}
159
160static const Scheduler_SMP_Node_state states[2] = {
161  SCHEDULER_SMP_NODE_SCHEDULED,
162  SCHEDULER_SMP_NODE_READY
163};
164
165static const Priority_Control priorities[2] = { 2, 5 };
166
167static const bool prepend_it[2] = { true, false };
168
169static void test_change_priority(void)
170{
171  rtems_status_code sc;
172  rtems_id task_id;
173  Thread_Control *executing;
174  Scheduler_SMP_Node *executing_node;
175  size_t i;
176  size_t j;
177  size_t k;
178
179  task_id = start_task(3);
180  executing = _Thread_Get_executing();
181  executing_node = get_scheduler_node(executing);
182
183  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
184    for (j = 0; j < RTEMS_ARRAY_SIZE(priorities); ++j) {
185      for (k = 0; k < RTEMS_ARRAY_SIZE(prepend_it); ++k) {
186        test_case_change_priority(
187          executing,
188          executing_node,
189          states[i],
190          priorities[j],
191          prepend_it[k],
192          states[j]
193        );
194      }
195    }
196  }
197
198  sc = rtems_task_delete(task_id);
199  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
200}
201
202static Thread_Control *update_priority_op(
203  Thread_Control *thread,
204  Scheduler_SMP_Node *scheduler_node,
205  Priority_Control new_priority,
206  bool prepend_it
207)
208{
209  const Scheduler_Control *scheduler;
210  ISR_lock_Context state_lock_context;
211  ISR_lock_Context scheduler_lock_context;
212  Thread_Control *needs_help;
213  Thread_queue_Context queue_context;
214
215  apply_priority(thread, new_priority, prepend_it, &queue_context);
216
217  _Thread_State_acquire( thread, &state_lock_context );
218  scheduler = _Scheduler_Get( thread );
219  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
220
221  needs_help = (*scheduler->Operations.update_priority)(
222    scheduler,
223    thread,
224    &scheduler_node->Base
225  );
226
227  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
228  _Thread_State_release( thread, &state_lock_context );
229
230  return needs_help;
231}
232
233static void test_case_update_priority_op(
234  Thread_Control *executing,
235  Scheduler_SMP_Node *executing_node,
236  Thread_Control *other,
237  Scheduler_SMP_Node_state start_state,
238  Priority_Control prio,
239  bool prepend_it,
240  Scheduler_SMP_Node_state new_state
241)
242{
243  Thread_Control *needs_help;
244  Per_CPU_Control *cpu_self;
245
246  cpu_self = _Thread_Dispatch_disable();
247
248  switch (start_state) {
249    case SCHEDULER_SMP_NODE_SCHEDULED:
250      change_priority(executing, 1, true);
251      break;
252    case SCHEDULER_SMP_NODE_READY:
253      change_priority(executing, 4, true);
254      break;
255    default:
256      rtems_test_assert(0);
257      break;
258  }
259  rtems_test_assert(executing_node->state == start_state);
260
261  needs_help = update_priority_op(executing, executing_node, prio, prepend_it);
262  rtems_test_assert(executing_node->state == new_state);
263
264  if (start_state != new_state) {
265    switch (start_state) {
266      case SCHEDULER_SMP_NODE_SCHEDULED:
267        rtems_test_assert(needs_help == executing);
268        break;
269      case SCHEDULER_SMP_NODE_READY:
270        rtems_test_assert(needs_help == other);
271        break;
272      default:
273        rtems_test_assert(0);
274        break;
275    }
276  } else {
277    rtems_test_assert(needs_help == NULL);
278  }
279
280  change_priority(executing, 1, true);
281  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
282
283  _Thread_Dispatch_enable( cpu_self );
284}
285
286static void test_update_priority_op(void)
287{
288  rtems_status_code sc;
289  rtems_id task_id;
290  Thread_Control *executing;
291  Scheduler_SMP_Node *executing_node;
292  Thread_Control *other;
293  size_t i;
294  size_t j;
295  size_t k;
296
297  task_id = start_task(3);
298  executing = _Thread_Get_executing();
299  executing_node = get_scheduler_node(executing);
300
301  other = get_thread_by_id(task_id);
302
303  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
304    for (j = 0; j < RTEMS_ARRAY_SIZE(priorities); ++j) {
305      for (k = 0; k < RTEMS_ARRAY_SIZE(prepend_it); ++k) {
306        test_case_update_priority_op(
307          executing,
308          executing_node,
309          other,
310          states[i],
311          priorities[j],
312          prepend_it[k],
313          states[j]
314        );
315      }
316    }
317  }
318
319  sc = rtems_task_delete(task_id);
320  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
321}
322
323static Thread_Control *yield_op(
324  Thread_Control *thread,
325  Scheduler_SMP_Node *scheduler_node
326)
327{
328  const Scheduler_Control *scheduler;
329  ISR_lock_Context state_lock_context;
330  ISR_lock_Context scheduler_lock_context;
331  Thread_Control *needs_help;
332
333  _Thread_State_acquire( thread, &state_lock_context );
334  scheduler = _Scheduler_Get( thread );
335  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
336
337  needs_help = (*scheduler->Operations.yield)(
338    scheduler,
339    thread,
340    &scheduler_node->Base
341  );
342
343  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
344  _Thread_State_release( thread, &state_lock_context );
345
346  return needs_help;
347}
348
349static void test_case_yield_op(
350  Thread_Control *executing,
351  Scheduler_SMP_Node *executing_node,
352  Thread_Control *other,
353  Scheduler_SMP_Node_state start_state,
354  Scheduler_SMP_Node_state new_state
355)
356{
357  Thread_Control *needs_help;
358  Per_CPU_Control *cpu_self;
359
360  cpu_self = _Thread_Dispatch_disable();
361
362  change_priority(executing, 4, false);
363  change_priority(other, 4, false);
364
365  switch (start_state) {
366    case SCHEDULER_SMP_NODE_SCHEDULED:
367      switch (new_state) {
368        case SCHEDULER_SMP_NODE_SCHEDULED:
369          change_priority(executing, 2, false);
370          change_priority(other, 3, false);
371          break;
372        case SCHEDULER_SMP_NODE_READY:
373          change_priority(executing, 2, false);
374          change_priority(other, 2, false);
375          break;
376        default:
377          rtems_test_assert(0);
378          break;
379      }
380      break;
381    case SCHEDULER_SMP_NODE_READY:
382      switch (new_state) {
383        case SCHEDULER_SMP_NODE_SCHEDULED:
384          rtems_test_assert(0);
385          break;
386        case SCHEDULER_SMP_NODE_READY:
387          change_priority(executing, 3, false);
388          change_priority(other, 2, false);
389          break;
390        default:
391          rtems_test_assert(0);
392          break;
393      }
394      break;
395    default:
396      rtems_test_assert(0);
397      break;
398  }
399  rtems_test_assert(executing_node->state == start_state);
400
401  needs_help = yield_op(executing, executing_node);
402  rtems_test_assert(executing_node->state == new_state);
403
404  if (start_state != new_state) {
405    switch (start_state) {
406      case SCHEDULER_SMP_NODE_SCHEDULED:
407        rtems_test_assert(needs_help == executing);
408        break;
409      case SCHEDULER_SMP_NODE_READY:
410        rtems_test_assert(needs_help == other);
411        break;
412      default:
413        rtems_test_assert(0);
414        break;
415    }
416  } else {
417    rtems_test_assert(needs_help == NULL);
418  }
419
420  change_priority(executing, 1, true);
421  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
422
423  _Thread_Dispatch_enable( cpu_self );
424}
425
426static void test_yield_op(void)
427{
428  rtems_status_code sc;
429  rtems_id task_id;
430  Thread_Control *executing;
431  Scheduler_SMP_Node *executing_node;
432  Thread_Control *other;
433  size_t i;
434  size_t j;
435
436  task_id = start_task(2);
437  executing = _Thread_Get_executing();
438  executing_node = get_scheduler_node(executing);
439
440  other = get_thread_by_id(task_id);
441
442  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
443    for (j = 0; j < RTEMS_ARRAY_SIZE(states); ++j) {
444      if (
445        states[i] != SCHEDULER_SMP_NODE_READY
446          || states[j] != SCHEDULER_SMP_NODE_SCHEDULED
447      ) {
448        test_case_yield_op(
449          executing,
450          executing_node,
451          other,
452          states[i],
453          states[j]
454        );
455      }
456    }
457  }
458
459  sc = rtems_task_delete(task_id);
460  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
461}
462
463static void block_op(
464  Thread_Control *thread,
465  Scheduler_SMP_Node *scheduler_node
466)
467{
468  const Scheduler_Control *scheduler;
469  ISR_lock_Context state_lock_context;
470  ISR_lock_Context scheduler_lock_context;
471
472  _Thread_State_acquire( thread, &state_lock_context );
473  scheduler = _Scheduler_Get( thread );
474  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
475
476  (*scheduler->Operations.block)(scheduler, thread, &scheduler_node->Base);
477
478  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
479  _Thread_State_release( thread, &state_lock_context );
480}
481
482static Thread_Control *unblock_op(
483  Thread_Control *thread,
484  Scheduler_SMP_Node *scheduler_node
485)
486{
487  const Scheduler_Control *scheduler;
488  ISR_lock_Context state_lock_context;
489  ISR_lock_Context scheduler_lock_context;
490  Thread_Control *needs_help;
491
492  _Thread_State_acquire( thread, &state_lock_context );
493  scheduler = _Scheduler_Get( thread );
494  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
495
496  needs_help = (*scheduler->Operations.unblock)(
497    scheduler,
498    thread,
499    &scheduler_node->Base
500  );
501
502  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
503  _Thread_State_release( thread, &state_lock_context );
504
505  return needs_help;
506}
507
508static void test_case_unblock_op(
509  Thread_Control *executing,
510  Scheduler_SMP_Node *executing_node,
511  Thread_Control *other,
512  Scheduler_SMP_Node_state new_state
513)
514{
515  Thread_Control *needs_help;
516  Per_CPU_Control *cpu_self;
517
518  cpu_self = _Thread_Dispatch_disable();
519
520  switch (new_state) {
521    case SCHEDULER_SMP_NODE_SCHEDULED:
522      change_priority(executing, 2, false);
523      rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
524      break;
525    case SCHEDULER_SMP_NODE_READY:
526      change_priority(executing, 4, false);
527      rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_READY);
528      break;
529    default:
530      rtems_test_assert(0);
531      break;
532  }
533
534  block_op(executing, executing_node);
535  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_BLOCKED);
536
537  needs_help = unblock_op(executing, executing_node);
538  rtems_test_assert(executing_node->state == new_state);
539
540  switch (new_state) {
541    case SCHEDULER_SMP_NODE_SCHEDULED:
542      rtems_test_assert(needs_help == other);
543      break;
544    case SCHEDULER_SMP_NODE_READY:
545      rtems_test_assert(needs_help == executing);
546      break;
547    default:
548      rtems_test_assert(0);
549      break;
550  }
551
552  change_priority(executing, 1, true);
553  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
554
555  _Thread_Dispatch_enable( cpu_self );
556}
557
558static void test_unblock_op(void)
559{
560  rtems_status_code sc;
561  rtems_id task_id;
562  Thread_Control *executing;
563  Scheduler_SMP_Node *executing_node;
564  Thread_Control *other;
565  size_t i;
566
567  task_id = start_task(3);
568  executing = _Thread_Get_executing();
569  executing_node = get_scheduler_node(executing);
570
571  other = get_thread_by_id(task_id);
572
573  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
574    test_case_unblock_op(
575      executing,
576      executing_node,
577      other,
578      states[i]
579    );
580  }
581
582  sc = rtems_task_delete(task_id);
583  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
584}
585
586static void tests(void)
587{
588  test_change_priority();
589  test_update_priority_op();
590  test_yield_op();
591  test_unblock_op();
592}
593
594static void test_task(rtems_task_argument arg)
595{
596  test_context *ctx = &test_instance;
597
598  tests();
599
600  ctx->cpu_index[arg] = rtems_get_current_processor();
601
602  barrier_wait(ctx);
603
604  rtems_task_suspend(RTEMS_SELF);
605  rtems_test_assert(0);
606}
607
608static void done(uint32_t cpu_index)
609{
610  printf("test done on processor %" PRIu32 "\n", cpu_index);
611}
612
613static void Init(rtems_task_argument arg)
614{
615  test_context *ctx = &test_instance;
616  rtems_status_code sc;
617  rtems_resource_snapshot snapshot;
618  uint32_t cpu_count = rtems_get_processor_count();
619  uint32_t cpu_index;
620
621  TEST_BEGIN();
622
623  rtems_resource_snapshot_take(&snapshot);
624
625  sc = rtems_barrier_create(
626    rtems_build_name('B', 'A', 'R', 'I'),
627    RTEMS_BARRIER_AUTOMATIC_RELEASE,
628    cpu_count,
629    &ctx->barrier_id
630  );
631  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
632
633  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
634    rtems_id scheduler_id;
635
636    sc = rtems_task_create(
637      rtems_build_name('T', 'A', 'S', 'K'),
638      255,
639      RTEMS_MINIMUM_STACK_SIZE,
640      RTEMS_DEFAULT_MODES,
641      RTEMS_DEFAULT_ATTRIBUTES,
642      &ctx->task_id[cpu_index]
643    );
644    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
645
646    sc = rtems_scheduler_ident(SCHED_NAME(cpu_index), &scheduler_id);
647    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
648
649    sc = rtems_task_set_scheduler(ctx->task_id[cpu_index], scheduler_id, 1);
650    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
651
652    sc = rtems_task_start(ctx->task_id[cpu_index], test_task, cpu_index);
653    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
654  }
655
656  tests();
657
658  barrier_wait(ctx);
659
660  sc = rtems_barrier_delete(ctx->barrier_id);
661  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
662
663  done(0);
664
665  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
666    sc = rtems_task_delete(ctx->task_id[cpu_index]);
667    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
668
669    rtems_test_assert(ctx->cpu_index[cpu_index] == cpu_index);
670
671    done(cpu_index);
672  }
673
674  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
675
676  TEST_END();
677  rtems_test_exit(0);
678}
679
680#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
681#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
682
683#define CONFIGURE_SMP_APPLICATION
684
685#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_MAX
686
687#define CONFIGURE_MAXIMUM_PRIORITY 255
688
689#define CONFIGURE_SCHEDULER_PRIORITY_SMP
690#define CONFIGURE_SCHEDULER_SIMPLE_SMP
691#define CONFIGURE_SCHEDULER_PRIORITY_AFFINITY_SMP
692
693#include <rtems/scheduler.h>
694
695RTEMS_SCHEDULER_CONTEXT_PRIORITY_SMP(a, CONFIGURE_MAXIMUM_PRIORITY + 1);
696
697RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(b);
698
699RTEMS_SCHEDULER_CONTEXT_PRIORITY_AFFINITY_SMP(
700  c,
701  CONFIGURE_MAXIMUM_PRIORITY + 1
702);
703
704#define CONFIGURE_SCHEDULER_CONTROLS \
705  RTEMS_SCHEDULER_CONTROL_PRIORITY_SMP(a, SCHED_NAME(0)), \
706  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(b, SCHED_NAME(1)), \
707  RTEMS_SCHEDULER_CONTROL_PRIORITY_AFFINITY_SMP(c, SCHED_NAME(2))
708
709#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
710  RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
711  RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
712  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
713
714#define CONFIGURE_MAXIMUM_TASKS 6
715#define CONFIGURE_MAXIMUM_BARRIERS 1
716
717#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
718
719#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
720
721#define CONFIGURE_INIT
722
723#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.