source: rtems/testsuites/smptests/smpscheduler03/init.c @ 9c238e1

Last change on this file since 9c238e1 was 9c238e1, checked in by Sebastian Huber <sebastian.huber@…>, on Oct 21, 2016 at 12:33:01 PM

score: Simplify update priority scheduler op

Remove unused return status.

  • Property mode set to 100644
File size: 17.3 KB
Line 
1/*
2 * Copyright (c) 2014-2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#include <stdio.h>
20#include <inttypes.h>
21
22#include <rtems.h>
23#include <rtems/libcsupport.h>
24#include <rtems/score/threadimpl.h>
25#include <rtems/score/schedulersmpimpl.h>
26
27#include "tmacros.h"
28
29const char rtems_test_name[] = "SMPSCHEDULER 3";
30
31#define CPU_MAX 3
32
33#define SCHED_NAME(i) rtems_build_name(' ', ' ', ' ', (char) ('A' + (i)))
34
35typedef struct {
36  rtems_id barrier_id;
37  rtems_id task_id[CPU_MAX];
38  uint32_t cpu_index[CPU_MAX];
39} test_context;
40
41static test_context test_instance;
42
43static Scheduler_SMP_Node *get_scheduler_node(Thread_Control *thread)
44{
45  return _Scheduler_SMP_Node_downcast(_Thread_Scheduler_get_home_node(thread));
46}
47
48static void apply_priority(
49  Thread_Control *thread,
50  Priority_Control new_priority,
51  bool prepend_it,
52  Thread_queue_Context *queue_context
53)
54{
55  _Thread_queue_Context_clear_priority_updates(queue_context);
56  _Thread_Wait_acquire(thread, queue_context);
57  _Thread_Priority_change(
58    thread,
59    &thread->Real_priority,
60    new_priority,
61    prepend_it,
62    queue_context
63  );
64  _Thread_Wait_release(thread, queue_context);
65}
66
67static void change_priority(
68  Thread_Control *thread,
69  Priority_Control new_priority,
70  bool prepend_it
71)
72{
73  Thread_queue_Context queue_context;
74
75  apply_priority(thread, new_priority, prepend_it, &queue_context);
76  _Thread_Priority_update(&queue_context);
77}
78
79static void barrier_wait(test_context *ctx)
80{
81  rtems_status_code sc;
82
83  sc = rtems_barrier_wait(ctx->barrier_id, RTEMS_NO_TIMEOUT);
84  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
85}
86
87static void task(rtems_task_argument arg)
88{
89  rtems_test_assert(0);
90}
91
92static rtems_id start_task(rtems_task_priority prio)
93{
94  rtems_status_code sc;
95  rtems_id task_id;
96
97  sc = rtems_task_create(
98    rtems_build_name('T', 'A', 'S', 'K'),
99    prio,
100    RTEMS_MINIMUM_STACK_SIZE,
101    RTEMS_DEFAULT_MODES,
102    RTEMS_DEFAULT_ATTRIBUTES,
103    &task_id
104  );
105  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
106
107  sc = rtems_task_start(task_id, task, 0);
108  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
109
110  return task_id;
111}
112
113static Thread_Control *get_thread_by_id(rtems_id task_id)
114{
115  ISR_lock_Context lock_context;
116  Thread_Control *thread;
117
118  thread = _Thread_Get(task_id, &lock_context);
119  rtems_test_assert(thread != NULL);
120  _ISR_lock_ISR_enable(&lock_context);
121
122  return thread;
123}
124
125static void test_case_change_priority(
126  Thread_Control *executing,
127  Scheduler_SMP_Node *executing_node,
128  Scheduler_SMP_Node_state start_state,
129  Priority_Control prio,
130  bool prepend_it,
131  Scheduler_SMP_Node_state new_state
132)
133{
134  Per_CPU_Control *cpu_self;
135
136  cpu_self = _Thread_Dispatch_disable();
137
138  switch (start_state) {
139    case SCHEDULER_SMP_NODE_SCHEDULED:
140      change_priority(executing, 1, true);
141      break;
142    case SCHEDULER_SMP_NODE_READY:
143      change_priority(executing, 4, true);
144      break;
145    default:
146      rtems_test_assert(0);
147      break;
148  }
149  rtems_test_assert(executing_node->state == start_state);
150
151  change_priority(executing, prio, prepend_it);
152  rtems_test_assert(executing_node->state == new_state);
153
154  change_priority(executing, 1, true);
155  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
156
157  _Thread_Dispatch_enable( cpu_self );
158}
159
160static const Scheduler_SMP_Node_state states[2] = {
161  SCHEDULER_SMP_NODE_SCHEDULED,
162  SCHEDULER_SMP_NODE_READY
163};
164
165static const Priority_Control priorities[2] = { 2, 5 };
166
167static const bool prepend_it[2] = { true, false };
168
169static void test_change_priority(void)
170{
171  rtems_status_code sc;
172  rtems_id task_id;
173  Thread_Control *executing;
174  Scheduler_SMP_Node *executing_node;
175  size_t i;
176  size_t j;
177  size_t k;
178
179  task_id = start_task(3);
180  executing = _Thread_Get_executing();
181  executing_node = get_scheduler_node(executing);
182
183  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
184    for (j = 0; j < RTEMS_ARRAY_SIZE(priorities); ++j) {
185      for (k = 0; k < RTEMS_ARRAY_SIZE(prepend_it); ++k) {
186        test_case_change_priority(
187          executing,
188          executing_node,
189          states[i],
190          priorities[j],
191          prepend_it[k],
192          states[j]
193        );
194      }
195    }
196  }
197
198  sc = rtems_task_delete(task_id);
199  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
200}
201
202static void update_priority_op(
203  Thread_Control *thread,
204  Scheduler_SMP_Node *scheduler_node,
205  Priority_Control new_priority,
206  bool prepend_it
207)
208{
209  const Scheduler_Control *scheduler;
210  ISR_lock_Context state_lock_context;
211  ISR_lock_Context scheduler_lock_context;
212  Thread_queue_Context queue_context;
213
214  apply_priority(thread, new_priority, prepend_it, &queue_context);
215
216  _Thread_State_acquire( thread, &state_lock_context );
217  scheduler = _Scheduler_Get( thread );
218  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
219
220  (*scheduler->Operations.update_priority)(
221    scheduler,
222    thread,
223    &scheduler_node->Base
224  );
225
226  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
227  _Thread_State_release( thread, &state_lock_context );
228}
229
230static void test_case_update_priority_op(
231  Thread_Control *executing,
232  Scheduler_SMP_Node *executing_node,
233  Thread_Control *other,
234  Scheduler_SMP_Node_state start_state,
235  Priority_Control prio,
236  bool prepend_it,
237  Scheduler_SMP_Node_state new_state
238)
239{
240  Per_CPU_Control *cpu_self;
241
242  cpu_self = _Thread_Dispatch_disable();
243
244  switch (start_state) {
245    case SCHEDULER_SMP_NODE_SCHEDULED:
246      change_priority(executing, 1, true);
247      break;
248    case SCHEDULER_SMP_NODE_READY:
249      change_priority(executing, 4, true);
250      break;
251    default:
252      rtems_test_assert(0);
253      break;
254  }
255  rtems_test_assert(executing_node->state == start_state);
256
257  update_priority_op(executing, executing_node, prio, prepend_it);
258  rtems_test_assert(executing_node->state == new_state);
259
260  if (start_state != new_state) {
261    switch (start_state) {
262      case SCHEDULER_SMP_NODE_SCHEDULED:
263        rtems_test_assert(cpu_self->heir == other);
264        break;
265      case SCHEDULER_SMP_NODE_READY:
266        rtems_test_assert(cpu_self->heir == executing);
267        break;
268      default:
269        rtems_test_assert(0);
270        break;
271    }
272  }
273
274  change_priority(executing, 1, true);
275  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
276
277  _Thread_Dispatch_enable( cpu_self );
278}
279
280static void test_update_priority_op(void)
281{
282  rtems_status_code sc;
283  rtems_id task_id;
284  Thread_Control *executing;
285  Scheduler_SMP_Node *executing_node;
286  Thread_Control *other;
287  size_t i;
288  size_t j;
289  size_t k;
290
291  task_id = start_task(3);
292  executing = _Thread_Get_executing();
293  executing_node = get_scheduler_node(executing);
294
295  other = get_thread_by_id(task_id);
296
297  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
298    for (j = 0; j < RTEMS_ARRAY_SIZE(priorities); ++j) {
299      for (k = 0; k < RTEMS_ARRAY_SIZE(prepend_it); ++k) {
300        test_case_update_priority_op(
301          executing,
302          executing_node,
303          other,
304          states[i],
305          priorities[j],
306          prepend_it[k],
307          states[j]
308        );
309      }
310    }
311  }
312
313  sc = rtems_task_delete(task_id);
314  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
315}
316
317static Thread_Control *yield_op(
318  Thread_Control *thread,
319  Scheduler_SMP_Node *scheduler_node
320)
321{
322  const Scheduler_Control *scheduler;
323  ISR_lock_Context state_lock_context;
324  ISR_lock_Context scheduler_lock_context;
325  Thread_Control *needs_help;
326
327  _Thread_State_acquire( thread, &state_lock_context );
328  scheduler = _Scheduler_Get( thread );
329  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
330
331  needs_help = (*scheduler->Operations.yield)(
332    scheduler,
333    thread,
334    &scheduler_node->Base
335  );
336
337  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
338  _Thread_State_release( thread, &state_lock_context );
339
340  return needs_help;
341}
342
343static void test_case_yield_op(
344  Thread_Control *executing,
345  Scheduler_SMP_Node *executing_node,
346  Thread_Control *other,
347  Scheduler_SMP_Node_state start_state,
348  Scheduler_SMP_Node_state new_state
349)
350{
351  Thread_Control *needs_help;
352  Per_CPU_Control *cpu_self;
353
354  cpu_self = _Thread_Dispatch_disable();
355
356  change_priority(executing, 4, false);
357  change_priority(other, 4, false);
358
359  switch (start_state) {
360    case SCHEDULER_SMP_NODE_SCHEDULED:
361      switch (new_state) {
362        case SCHEDULER_SMP_NODE_SCHEDULED:
363          change_priority(executing, 2, false);
364          change_priority(other, 3, false);
365          break;
366        case SCHEDULER_SMP_NODE_READY:
367          change_priority(executing, 2, false);
368          change_priority(other, 2, false);
369          break;
370        default:
371          rtems_test_assert(0);
372          break;
373      }
374      break;
375    case SCHEDULER_SMP_NODE_READY:
376      switch (new_state) {
377        case SCHEDULER_SMP_NODE_SCHEDULED:
378          rtems_test_assert(0);
379          break;
380        case SCHEDULER_SMP_NODE_READY:
381          change_priority(executing, 3, false);
382          change_priority(other, 2, false);
383          break;
384        default:
385          rtems_test_assert(0);
386          break;
387      }
388      break;
389    default:
390      rtems_test_assert(0);
391      break;
392  }
393  rtems_test_assert(executing_node->state == start_state);
394
395  needs_help = yield_op(executing, executing_node);
396  rtems_test_assert(executing_node->state == new_state);
397
398  if (start_state != new_state) {
399    switch (start_state) {
400      case SCHEDULER_SMP_NODE_SCHEDULED:
401        rtems_test_assert(needs_help == executing);
402        break;
403      case SCHEDULER_SMP_NODE_READY:
404        rtems_test_assert(needs_help == other);
405        break;
406      default:
407        rtems_test_assert(0);
408        break;
409    }
410  } else {
411    rtems_test_assert(needs_help == NULL);
412  }
413
414  change_priority(executing, 1, true);
415  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
416
417  _Thread_Dispatch_enable( cpu_self );
418}
419
420static void test_yield_op(void)
421{
422  rtems_status_code sc;
423  rtems_id task_id;
424  Thread_Control *executing;
425  Scheduler_SMP_Node *executing_node;
426  Thread_Control *other;
427  size_t i;
428  size_t j;
429
430  task_id = start_task(2);
431  executing = _Thread_Get_executing();
432  executing_node = get_scheduler_node(executing);
433
434  other = get_thread_by_id(task_id);
435
436  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
437    for (j = 0; j < RTEMS_ARRAY_SIZE(states); ++j) {
438      if (
439        states[i] != SCHEDULER_SMP_NODE_READY
440          || states[j] != SCHEDULER_SMP_NODE_SCHEDULED
441      ) {
442        test_case_yield_op(
443          executing,
444          executing_node,
445          other,
446          states[i],
447          states[j]
448        );
449      }
450    }
451  }
452
453  sc = rtems_task_delete(task_id);
454  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
455}
456
457static void block_op(
458  Thread_Control *thread,
459  Scheduler_SMP_Node *scheduler_node
460)
461{
462  const Scheduler_Control *scheduler;
463  ISR_lock_Context state_lock_context;
464  ISR_lock_Context scheduler_lock_context;
465
466  _Thread_State_acquire( thread, &state_lock_context );
467  scheduler = _Scheduler_Get( thread );
468  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
469
470  (*scheduler->Operations.block)(scheduler, thread, &scheduler_node->Base);
471
472  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
473  _Thread_State_release( thread, &state_lock_context );
474}
475
476static Thread_Control *unblock_op(
477  Thread_Control *thread,
478  Scheduler_SMP_Node *scheduler_node
479)
480{
481  const Scheduler_Control *scheduler;
482  ISR_lock_Context state_lock_context;
483  ISR_lock_Context scheduler_lock_context;
484  Thread_Control *needs_help;
485
486  _Thread_State_acquire( thread, &state_lock_context );
487  scheduler = _Scheduler_Get( thread );
488  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
489
490  needs_help = (*scheduler->Operations.unblock)(
491    scheduler,
492    thread,
493    &scheduler_node->Base
494  );
495
496  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
497  _Thread_State_release( thread, &state_lock_context );
498
499  return needs_help;
500}
501
502static void test_case_unblock_op(
503  Thread_Control *executing,
504  Scheduler_SMP_Node *executing_node,
505  Thread_Control *other,
506  Scheduler_SMP_Node_state new_state
507)
508{
509  Thread_Control *needs_help;
510  Per_CPU_Control *cpu_self;
511
512  cpu_self = _Thread_Dispatch_disable();
513
514  switch (new_state) {
515    case SCHEDULER_SMP_NODE_SCHEDULED:
516      change_priority(executing, 2, false);
517      rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
518      break;
519    case SCHEDULER_SMP_NODE_READY:
520      change_priority(executing, 4, false);
521      rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_READY);
522      break;
523    default:
524      rtems_test_assert(0);
525      break;
526  }
527
528  block_op(executing, executing_node);
529  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_BLOCKED);
530
531  needs_help = unblock_op(executing, executing_node);
532  rtems_test_assert(executing_node->state == new_state);
533
534  switch (new_state) {
535    case SCHEDULER_SMP_NODE_SCHEDULED:
536      rtems_test_assert(needs_help == other);
537      break;
538    case SCHEDULER_SMP_NODE_READY:
539      rtems_test_assert(needs_help == executing);
540      break;
541    default:
542      rtems_test_assert(0);
543      break;
544  }
545
546  change_priority(executing, 1, true);
547  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
548
549  _Thread_Dispatch_enable( cpu_self );
550}
551
552static void test_unblock_op(void)
553{
554  rtems_status_code sc;
555  rtems_id task_id;
556  Thread_Control *executing;
557  Scheduler_SMP_Node *executing_node;
558  Thread_Control *other;
559  size_t i;
560
561  task_id = start_task(3);
562  executing = _Thread_Get_executing();
563  executing_node = get_scheduler_node(executing);
564
565  other = get_thread_by_id(task_id);
566
567  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
568    test_case_unblock_op(
569      executing,
570      executing_node,
571      other,
572      states[i]
573    );
574  }
575
576  sc = rtems_task_delete(task_id);
577  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
578}
579
580static void tests(void)
581{
582  test_change_priority();
583  test_update_priority_op();
584  test_yield_op();
585  test_unblock_op();
586}
587
588static void test_task(rtems_task_argument arg)
589{
590  test_context *ctx = &test_instance;
591
592  tests();
593
594  ctx->cpu_index[arg] = rtems_get_current_processor();
595
596  barrier_wait(ctx);
597
598  rtems_task_suspend(RTEMS_SELF);
599  rtems_test_assert(0);
600}
601
602static void done(uint32_t cpu_index)
603{
604  printf("test done on processor %" PRIu32 "\n", cpu_index);
605}
606
607static void Init(rtems_task_argument arg)
608{
609  test_context *ctx = &test_instance;
610  rtems_status_code sc;
611  rtems_resource_snapshot snapshot;
612  uint32_t cpu_count = rtems_get_processor_count();
613  uint32_t cpu_index;
614
615  TEST_BEGIN();
616
617  rtems_resource_snapshot_take(&snapshot);
618
619  sc = rtems_barrier_create(
620    rtems_build_name('B', 'A', 'R', 'I'),
621    RTEMS_BARRIER_AUTOMATIC_RELEASE,
622    cpu_count,
623    &ctx->barrier_id
624  );
625  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
626
627  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
628    rtems_id scheduler_id;
629
630    sc = rtems_task_create(
631      rtems_build_name('T', 'A', 'S', 'K'),
632      255,
633      RTEMS_MINIMUM_STACK_SIZE,
634      RTEMS_DEFAULT_MODES,
635      RTEMS_DEFAULT_ATTRIBUTES,
636      &ctx->task_id[cpu_index]
637    );
638    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
639
640    sc = rtems_scheduler_ident(SCHED_NAME(cpu_index), &scheduler_id);
641    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
642
643    sc = rtems_task_set_scheduler(ctx->task_id[cpu_index], scheduler_id, 1);
644    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
645
646    sc = rtems_task_start(ctx->task_id[cpu_index], test_task, cpu_index);
647    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
648  }
649
650  tests();
651
652  barrier_wait(ctx);
653
654  sc = rtems_barrier_delete(ctx->barrier_id);
655  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
656
657  done(0);
658
659  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
660    sc = rtems_task_delete(ctx->task_id[cpu_index]);
661    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
662
663    rtems_test_assert(ctx->cpu_index[cpu_index] == cpu_index);
664
665    done(cpu_index);
666  }
667
668  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
669
670  TEST_END();
671  rtems_test_exit(0);
672}
673
674#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
675#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
676
677#define CONFIGURE_SMP_APPLICATION
678
679#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_MAX
680
681#define CONFIGURE_MAXIMUM_PRIORITY 255
682
683#define CONFIGURE_SCHEDULER_PRIORITY_SMP
684#define CONFIGURE_SCHEDULER_SIMPLE_SMP
685#define CONFIGURE_SCHEDULER_PRIORITY_AFFINITY_SMP
686
687#include <rtems/scheduler.h>
688
689RTEMS_SCHEDULER_CONTEXT_PRIORITY_SMP(a, CONFIGURE_MAXIMUM_PRIORITY + 1);
690
691RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(b);
692
693RTEMS_SCHEDULER_CONTEXT_PRIORITY_AFFINITY_SMP(
694  c,
695  CONFIGURE_MAXIMUM_PRIORITY + 1
696);
697
698#define CONFIGURE_SCHEDULER_CONTROLS \
699  RTEMS_SCHEDULER_CONTROL_PRIORITY_SMP(a, SCHED_NAME(0)), \
700  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(b, SCHED_NAME(1)), \
701  RTEMS_SCHEDULER_CONTROL_PRIORITY_AFFINITY_SMP(c, SCHED_NAME(2))
702
703#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
704  RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
705  RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
706  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
707
708#define CONFIGURE_MAXIMUM_TASKS 6
709#define CONFIGURE_MAXIMUM_BARRIERS 1
710
711#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
712
713#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
714
715#define CONFIGURE_INIT
716
717#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.