source: rtems/testsuites/smptests/smpscheduler03/init.c @ ca1e546e

5
Last change on this file since ca1e546e was ca1e546e, checked in by Sebastian Huber <sebastian.huber@…>, on 02/02/17 at 15:24:05

score: Improve scheduler helping protocol

Only register ask for help requests in the scheduler unblock and yield
operations. The actual ask for help operation is carried out during
_Thread_Do_dispatch() on a processor related to the thread. This yields
a better separation of scheduler instances. A thread of one scheduler
instance should not be forced to carry out too much work for threads on
other scheduler instances.

Update #2556.

  • Property mode set to 100644
File size: 16.8 KB
Line 
1/*
2 * Copyright (c) 2014-2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#include <stdio.h>
20#include <inttypes.h>
21
22#include <rtems.h>
23#include <rtems/libcsupport.h>
24#include <rtems/score/threadimpl.h>
25#include <rtems/score/schedulersmpimpl.h>
26
27#include "tmacros.h"
28
29const char rtems_test_name[] = "SMPSCHEDULER 3";
30
31#define CPU_MAX 3
32
33#define SCHED_NAME(i) rtems_build_name(' ', ' ', ' ', (char) ('A' + (i)))
34
35typedef struct {
36  rtems_id barrier_id;
37  rtems_id task_id[CPU_MAX];
38  uint32_t cpu_index[CPU_MAX];
39} test_context;
40
41static test_context test_instance;
42
43static Scheduler_SMP_Node *get_scheduler_node(Thread_Control *thread)
44{
45  return _Scheduler_SMP_Node_downcast(_Thread_Scheduler_get_home_node(thread));
46}
47
48static void apply_priority(
49  Thread_Control *thread,
50  Priority_Control new_priority,
51  bool prepend_it,
52  Thread_queue_Context *queue_context
53)
54{
55  _Thread_queue_Context_initialize(queue_context);
56  _Thread_queue_Context_clear_priority_updates(queue_context);
57  _Thread_Wait_acquire(thread, queue_context);
58  _Thread_Priority_change(
59    thread,
60    &thread->Real_priority,
61    new_priority,
62    prepend_it,
63    queue_context
64  );
65  _Thread_Wait_release(thread, queue_context);
66}
67
68static void change_priority(
69  Thread_Control *thread,
70  Priority_Control new_priority,
71  bool prepend_it
72)
73{
74  Thread_queue_Context queue_context;
75
76  apply_priority(thread, new_priority, prepend_it, &queue_context);
77  _Thread_Priority_update(&queue_context);
78}
79
80static void barrier_wait(test_context *ctx)
81{
82  rtems_status_code sc;
83
84  sc = rtems_barrier_wait(ctx->barrier_id, RTEMS_NO_TIMEOUT);
85  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
86}
87
88static void task(rtems_task_argument arg)
89{
90  rtems_test_assert(0);
91}
92
93static rtems_id start_task(rtems_task_priority prio)
94{
95  rtems_status_code sc;
96  rtems_id task_id;
97
98  sc = rtems_task_create(
99    rtems_build_name('T', 'A', 'S', 'K'),
100    prio,
101    RTEMS_MINIMUM_STACK_SIZE,
102    RTEMS_DEFAULT_MODES,
103    RTEMS_DEFAULT_ATTRIBUTES,
104    &task_id
105  );
106  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
107
108  sc = rtems_task_start(task_id, task, 0);
109  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
110
111  return task_id;
112}
113
114static Thread_Control *get_thread_by_id(rtems_id task_id)
115{
116  ISR_lock_Context lock_context;
117  Thread_Control *thread;
118
119  thread = _Thread_Get(task_id, &lock_context);
120  rtems_test_assert(thread != NULL);
121  _ISR_lock_ISR_enable(&lock_context);
122
123  return thread;
124}
125
126static void test_case_change_priority(
127  Thread_Control *executing,
128  Scheduler_SMP_Node *executing_node,
129  Scheduler_SMP_Node_state start_state,
130  Priority_Control prio,
131  bool prepend_it,
132  Scheduler_SMP_Node_state new_state
133)
134{
135  Per_CPU_Control *cpu_self;
136
137  cpu_self = _Thread_Dispatch_disable();
138
139  switch (start_state) {
140    case SCHEDULER_SMP_NODE_SCHEDULED:
141      change_priority(executing, 1, true);
142      break;
143    case SCHEDULER_SMP_NODE_READY:
144      change_priority(executing, 4, true);
145      break;
146    default:
147      rtems_test_assert(0);
148      break;
149  }
150  rtems_test_assert(executing_node->state == start_state);
151
152  change_priority(executing, prio, prepend_it);
153  rtems_test_assert(executing_node->state == new_state);
154
155  change_priority(executing, 1, true);
156  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
157
158  _Thread_Dispatch_enable( cpu_self );
159}
160
161static const Scheduler_SMP_Node_state states[2] = {
162  SCHEDULER_SMP_NODE_SCHEDULED,
163  SCHEDULER_SMP_NODE_READY
164};
165
166static const Priority_Control priorities[2] = { 2, 5 };
167
168static const bool prepend_it[2] = { true, false };
169
170static void test_change_priority(void)
171{
172  rtems_status_code sc;
173  rtems_id task_id;
174  Thread_Control *executing;
175  Scheduler_SMP_Node *executing_node;
176  size_t i;
177  size_t j;
178  size_t k;
179
180  task_id = start_task(3);
181  executing = _Thread_Get_executing();
182  executing_node = get_scheduler_node(executing);
183
184  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
185    for (j = 0; j < RTEMS_ARRAY_SIZE(priorities); ++j) {
186      for (k = 0; k < RTEMS_ARRAY_SIZE(prepend_it); ++k) {
187        test_case_change_priority(
188          executing,
189          executing_node,
190          states[i],
191          priorities[j],
192          prepend_it[k],
193          states[j]
194        );
195      }
196    }
197  }
198
199  sc = rtems_task_delete(task_id);
200  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
201}
202
203static void update_priority_op(
204  Thread_Control *thread,
205  Scheduler_SMP_Node *scheduler_node,
206  Priority_Control new_priority,
207  bool prepend_it
208)
209{
210  const Scheduler_Control *scheduler;
211  ISR_lock_Context state_lock_context;
212  ISR_lock_Context scheduler_lock_context;
213  Thread_queue_Context queue_context;
214
215  apply_priority(thread, new_priority, prepend_it, &queue_context);
216
217  _Thread_State_acquire( thread, &state_lock_context );
218  scheduler = _Thread_Scheduler_get_home( thread );
219  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
220
221  (*scheduler->Operations.update_priority)(
222    scheduler,
223    thread,
224    &scheduler_node->Base
225  );
226
227  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
228  _Thread_State_release( thread, &state_lock_context );
229}
230
231static void test_case_update_priority_op(
232  Thread_Control *executing,
233  Scheduler_SMP_Node *executing_node,
234  Thread_Control *other,
235  Scheduler_SMP_Node_state start_state,
236  Priority_Control prio,
237  bool prepend_it,
238  Scheduler_SMP_Node_state new_state
239)
240{
241  Per_CPU_Control *cpu_self;
242
243  cpu_self = _Thread_Dispatch_disable();
244
245  switch (start_state) {
246    case SCHEDULER_SMP_NODE_SCHEDULED:
247      change_priority(executing, 1, true);
248      break;
249    case SCHEDULER_SMP_NODE_READY:
250      change_priority(executing, 4, true);
251      break;
252    default:
253      rtems_test_assert(0);
254      break;
255  }
256  rtems_test_assert(executing_node->state == start_state);
257
258  update_priority_op(executing, executing_node, prio, prepend_it);
259  rtems_test_assert(executing_node->state == new_state);
260
261  if (start_state != new_state) {
262    switch (start_state) {
263      case SCHEDULER_SMP_NODE_SCHEDULED:
264        rtems_test_assert(cpu_self->heir == other);
265        break;
266      case SCHEDULER_SMP_NODE_READY:
267        rtems_test_assert(cpu_self->heir == executing);
268        break;
269      default:
270        rtems_test_assert(0);
271        break;
272    }
273  }
274
275  change_priority(executing, 1, true);
276  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
277
278  _Thread_Dispatch_enable( cpu_self );
279}
280
281static void test_update_priority_op(void)
282{
283  rtems_status_code sc;
284  rtems_id task_id;
285  Thread_Control *executing;
286  Scheduler_SMP_Node *executing_node;
287  Thread_Control *other;
288  size_t i;
289  size_t j;
290  size_t k;
291
292  task_id = start_task(3);
293  executing = _Thread_Get_executing();
294  executing_node = get_scheduler_node(executing);
295
296  other = get_thread_by_id(task_id);
297
298  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
299    for (j = 0; j < RTEMS_ARRAY_SIZE(priorities); ++j) {
300      for (k = 0; k < RTEMS_ARRAY_SIZE(prepend_it); ++k) {
301        test_case_update_priority_op(
302          executing,
303          executing_node,
304          other,
305          states[i],
306          priorities[j],
307          prepend_it[k],
308          states[j]
309        );
310      }
311    }
312  }
313
314  sc = rtems_task_delete(task_id);
315  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
316}
317
318static void yield_op(
319  Thread_Control *thread,
320  Scheduler_SMP_Node *scheduler_node
321)
322{
323  const Scheduler_Control *scheduler;
324  ISR_lock_Context state_lock_context;
325  ISR_lock_Context scheduler_lock_context;
326
327  _Thread_State_acquire( thread, &state_lock_context );
328  scheduler = _Thread_Scheduler_get_home( thread );
329  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
330
331  (*scheduler->Operations.yield)(
332    scheduler,
333    thread,
334    &scheduler_node->Base
335  );
336
337  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
338  _Thread_State_release( thread, &state_lock_context );
339}
340
341static void test_case_yield_op(
342  Thread_Control *executing,
343  Scheduler_SMP_Node *executing_node,
344  Thread_Control *other,
345  Scheduler_SMP_Node_state start_state,
346  Scheduler_SMP_Node_state new_state
347)
348{
349  Per_CPU_Control *cpu_self;
350
351  cpu_self = _Thread_Dispatch_disable();
352
353  change_priority(executing, 4, false);
354  change_priority(other, 4, false);
355
356  switch (start_state) {
357    case SCHEDULER_SMP_NODE_SCHEDULED:
358      switch (new_state) {
359        case SCHEDULER_SMP_NODE_SCHEDULED:
360          change_priority(executing, 2, false);
361          change_priority(other, 3, false);
362          break;
363        case SCHEDULER_SMP_NODE_READY:
364          change_priority(executing, 2, false);
365          change_priority(other, 2, false);
366          break;
367        default:
368          rtems_test_assert(0);
369          break;
370      }
371      break;
372    case SCHEDULER_SMP_NODE_READY:
373      switch (new_state) {
374        case SCHEDULER_SMP_NODE_SCHEDULED:
375          rtems_test_assert(0);
376          break;
377        case SCHEDULER_SMP_NODE_READY:
378          change_priority(executing, 3, false);
379          change_priority(other, 2, false);
380          break;
381        default:
382          rtems_test_assert(0);
383          break;
384      }
385      break;
386    default:
387      rtems_test_assert(0);
388      break;
389  }
390  rtems_test_assert(executing_node->state == start_state);
391
392  yield_op(executing, executing_node);
393  rtems_test_assert(executing_node->state == new_state);
394
395  switch (new_state) {
396    case SCHEDULER_SMP_NODE_SCHEDULED:
397    case SCHEDULER_SMP_NODE_READY:
398      break;
399    default:
400      rtems_test_assert(0);
401      break;
402  }
403
404  change_priority(executing, 1, true);
405  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
406
407  _Thread_Dispatch_enable( cpu_self );
408}
409
410static void test_yield_op(void)
411{
412  rtems_status_code sc;
413  rtems_id task_id;
414  Thread_Control *executing;
415  Scheduler_SMP_Node *executing_node;
416  Thread_Control *other;
417  size_t i;
418  size_t j;
419
420  task_id = start_task(2);
421  executing = _Thread_Get_executing();
422  executing_node = get_scheduler_node(executing);
423
424  other = get_thread_by_id(task_id);
425
426  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
427    for (j = 0; j < RTEMS_ARRAY_SIZE(states); ++j) {
428      if (
429        states[i] != SCHEDULER_SMP_NODE_READY
430          || states[j] != SCHEDULER_SMP_NODE_SCHEDULED
431      ) {
432        test_case_yield_op(
433          executing,
434          executing_node,
435          other,
436          states[i],
437          states[j]
438        );
439      }
440    }
441  }
442
443  sc = rtems_task_delete(task_id);
444  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
445}
446
447static void block_op(
448  Thread_Control *thread,
449  Scheduler_SMP_Node *scheduler_node
450)
451{
452  const Scheduler_Control *scheduler;
453  ISR_lock_Context state_lock_context;
454  ISR_lock_Context scheduler_lock_context;
455
456  _Thread_State_acquire( thread, &state_lock_context );
457  scheduler = _Thread_Scheduler_get_home( thread );
458  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
459
460  (*scheduler->Operations.block)(scheduler, thread, &scheduler_node->Base);
461
462  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
463  _Thread_State_release( thread, &state_lock_context );
464}
465
466static void unblock_op(
467  Thread_Control *thread,
468  Scheduler_SMP_Node *scheduler_node
469)
470{
471  const Scheduler_Control *scheduler;
472  ISR_lock_Context state_lock_context;
473  ISR_lock_Context scheduler_lock_context;
474
475  _Thread_State_acquire( thread, &state_lock_context );
476  scheduler = _Thread_Scheduler_get_home( thread );
477  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
478
479  (*scheduler->Operations.unblock)(
480    scheduler,
481    thread,
482    &scheduler_node->Base
483  );
484
485  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
486  _Thread_State_release( thread, &state_lock_context );
487}
488
489static void test_case_unblock_op(
490  Thread_Control *executing,
491  Scheduler_SMP_Node *executing_node,
492  Thread_Control *other,
493  Scheduler_SMP_Node_state new_state
494)
495{
496  Per_CPU_Control *cpu_self;
497
498  cpu_self = _Thread_Dispatch_disable();
499
500  switch (new_state) {
501    case SCHEDULER_SMP_NODE_SCHEDULED:
502      change_priority(executing, 2, false);
503      rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
504      break;
505    case SCHEDULER_SMP_NODE_READY:
506      change_priority(executing, 4, false);
507      rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_READY);
508      break;
509    default:
510      rtems_test_assert(0);
511      break;
512  }
513
514  block_op(executing, executing_node);
515  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_BLOCKED);
516
517  unblock_op(executing, executing_node);
518  rtems_test_assert(executing_node->state == new_state);
519
520  switch (new_state) {
521    case SCHEDULER_SMP_NODE_SCHEDULED:
522    case SCHEDULER_SMP_NODE_READY:
523      break;
524    default:
525      rtems_test_assert(0);
526      break;
527  }
528
529  change_priority(executing, 1, true);
530  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
531
532  _Thread_Dispatch_enable( cpu_self );
533}
534
535static void test_unblock_op(void)
536{
537  rtems_status_code sc;
538  rtems_id task_id;
539  Thread_Control *executing;
540  Scheduler_SMP_Node *executing_node;
541  Thread_Control *other;
542  size_t i;
543
544  task_id = start_task(3);
545  executing = _Thread_Get_executing();
546  executing_node = get_scheduler_node(executing);
547
548  other = get_thread_by_id(task_id);
549
550  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
551    test_case_unblock_op(
552      executing,
553      executing_node,
554      other,
555      states[i]
556    );
557  }
558
559  sc = rtems_task_delete(task_id);
560  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
561}
562
563static void tests(void)
564{
565  test_change_priority();
566  test_update_priority_op();
567  test_yield_op();
568  test_unblock_op();
569}
570
571static void test_task(rtems_task_argument arg)
572{
573  test_context *ctx = &test_instance;
574
575  tests();
576
577  ctx->cpu_index[arg] = rtems_get_current_processor();
578
579  barrier_wait(ctx);
580
581  rtems_task_suspend(RTEMS_SELF);
582  rtems_test_assert(0);
583}
584
585static void done(uint32_t cpu_index)
586{
587  printf("test done on processor %" PRIu32 "\n", cpu_index);
588}
589
590static void Init(rtems_task_argument arg)
591{
592  test_context *ctx = &test_instance;
593  rtems_status_code sc;
594  rtems_resource_snapshot snapshot;
595  uint32_t cpu_count = rtems_get_processor_count();
596  uint32_t cpu_index;
597
598  TEST_BEGIN();
599
600  rtems_resource_snapshot_take(&snapshot);
601
602  sc = rtems_barrier_create(
603    rtems_build_name('B', 'A', 'R', 'I'),
604    RTEMS_BARRIER_AUTOMATIC_RELEASE,
605    cpu_count,
606    &ctx->barrier_id
607  );
608  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
609
610  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
611    rtems_id scheduler_id;
612
613    sc = rtems_task_create(
614      rtems_build_name('T', 'A', 'S', 'K'),
615      255,
616      RTEMS_MINIMUM_STACK_SIZE,
617      RTEMS_DEFAULT_MODES,
618      RTEMS_DEFAULT_ATTRIBUTES,
619      &ctx->task_id[cpu_index]
620    );
621    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
622
623    sc = rtems_scheduler_ident(SCHED_NAME(cpu_index), &scheduler_id);
624    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
625
626    sc = rtems_task_set_scheduler(ctx->task_id[cpu_index], scheduler_id, 1);
627    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
628
629    sc = rtems_task_start(ctx->task_id[cpu_index], test_task, cpu_index);
630    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
631  }
632
633  tests();
634
635  barrier_wait(ctx);
636
637  sc = rtems_barrier_delete(ctx->barrier_id);
638  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
639
640  done(0);
641
642  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
643    sc = rtems_task_delete(ctx->task_id[cpu_index]);
644    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
645
646    rtems_test_assert(ctx->cpu_index[cpu_index] == cpu_index);
647
648    done(cpu_index);
649  }
650
651  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
652
653  TEST_END();
654  rtems_test_exit(0);
655}
656
657#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
658#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
659
660#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_MAX
661
662#define CONFIGURE_MAXIMUM_PRIORITY 255
663
664#define CONFIGURE_SCHEDULER_PRIORITY_SMP
665#define CONFIGURE_SCHEDULER_SIMPLE_SMP
666#define CONFIGURE_SCHEDULER_PRIORITY_AFFINITY_SMP
667
668#include <rtems/scheduler.h>
669
670RTEMS_SCHEDULER_CONTEXT_PRIORITY_SMP(a, CONFIGURE_MAXIMUM_PRIORITY + 1);
671
672RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(b);
673
674RTEMS_SCHEDULER_CONTEXT_PRIORITY_AFFINITY_SMP(
675  c,
676  CONFIGURE_MAXIMUM_PRIORITY + 1
677);
678
679#define CONFIGURE_SCHEDULER_CONTROLS \
680  RTEMS_SCHEDULER_CONTROL_PRIORITY_SMP(a, SCHED_NAME(0)), \
681  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(b, SCHED_NAME(1)), \
682  RTEMS_SCHEDULER_CONTROL_PRIORITY_AFFINITY_SMP(c, SCHED_NAME(2))
683
684#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
685  RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
686  RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
687  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
688
689#define CONFIGURE_MAXIMUM_TASKS 6
690#define CONFIGURE_MAXIMUM_BARRIERS 1
691
692#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
693
694#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
695
696#define CONFIGURE_INIT
697
698#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.