source: rtems/testsuites/smptests/smpscheduler03/init.c @ 2dd098a

5
Last change on this file since 2dd098a was 2dd098a, checked in by Sebastian Huber <sebastian.huber@…>, on 10/31/16 at 07:33:11

score: Introduce Thread_Scheduler_control::home

Replace Thread_Scheduler_control::control and
Thread_Scheduler_control::own_control with new
Thread_Scheduler_control::home.

Update #2556.

  • Property mode set to 100644
File size: 17.3 KB
RevLine 
[c0bff5e]1/*
[33e30f39]2 * Copyright (c) 2014-2015 embedded brains GmbH.  All rights reserved.
[c0bff5e]3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
[4962d24d]19#include <stdio.h>
20#include <inttypes.h>
21
[c0bff5e]22#include <rtems.h>
[4962d24d]23#include <rtems/libcsupport.h>
[c0bff5e]24#include <rtems/score/threadimpl.h>
25#include <rtems/score/schedulersmpimpl.h>
26
27#include "tmacros.h"
28
29const char rtems_test_name[] = "SMPSCHEDULER 3";
30
[4962d24d]31#define CPU_MAX 3
32
33#define SCHED_NAME(i) rtems_build_name(' ', ' ', ' ', (char) ('A' + (i)))
34
35typedef struct {
36  rtems_id barrier_id;
37  rtems_id task_id[CPU_MAX];
38  uint32_t cpu_index[CPU_MAX];
39} test_context;
40
41static test_context test_instance;
42
[501043a]43static Scheduler_SMP_Node *get_scheduler_node(Thread_Control *thread)
44{
45  return _Scheduler_SMP_Node_downcast(_Thread_Scheduler_get_home_node(thread));
46}
47
[300f6a48]48static void apply_priority(
49  Thread_Control *thread,
50  Priority_Control new_priority,
51  bool prepend_it,
52  Thread_queue_Context *queue_context
[900d337f]53)
54{
[300f6a48]55  _Thread_queue_Context_clear_priority_updates(queue_context);
56  _Thread_Wait_acquire(thread, queue_context);
57  _Thread_Priority_change(
58    thread,
59    &thread->Real_priority,
60    new_priority,
61    prepend_it,
62    queue_context
63  );
64  _Thread_Wait_release(thread, queue_context);
[900d337f]65}
66
67static void change_priority(
[300f6a48]68  Thread_Control *thread,
69  Priority_Control new_priority,
70  bool prepend_it
[900d337f]71)
72{
[300f6a48]73  Thread_queue_Context queue_context;
74
75  apply_priority(thread, new_priority, prepend_it, &queue_context);
76  _Thread_Priority_update(&queue_context);
[900d337f]77}
78
[4962d24d]79static void barrier_wait(test_context *ctx)
80{
81  rtems_status_code sc;
82
83  sc = rtems_barrier_wait(ctx->barrier_id, RTEMS_NO_TIMEOUT);
84  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
85}
86
[c0bff5e]87static void task(rtems_task_argument arg)
88{
89  rtems_test_assert(0);
90}
91
[8568341]92static rtems_id start_task(rtems_task_priority prio)
93{
94  rtems_status_code sc;
95  rtems_id task_id;
96
97  sc = rtems_task_create(
98    rtems_build_name('T', 'A', 'S', 'K'),
99    prio,
100    RTEMS_MINIMUM_STACK_SIZE,
101    RTEMS_DEFAULT_MODES,
102    RTEMS_DEFAULT_ATTRIBUTES,
103    &task_id
104  );
105  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
106
107  sc = rtems_task_start(task_id, task, 0);
108  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
109
110  return task_id;
111}
112
113static Thread_Control *get_thread_by_id(rtems_id task_id)
114{
[5eac9676]115  ISR_lock_Context lock_context;
[8568341]116  Thread_Control *thread;
117
[e266d13]118  thread = _Thread_Get(task_id, &lock_context);
[5eac9676]119  rtems_test_assert(thread != NULL);
120  _ISR_lock_ISR_enable(&lock_context);
[8568341]121
122  return thread;
123}
124
125static void test_case_change_priority(
[c0bff5e]126  Thread_Control *executing,
[501043a]127  Scheduler_SMP_Node *executing_node,
[c0bff5e]128  Scheduler_SMP_Node_state start_state,
129  Priority_Control prio,
130  bool prepend_it,
131  Scheduler_SMP_Node_state new_state
132)
133{
[33e30f39]134  Per_CPU_Control *cpu_self;
135
136  cpu_self = _Thread_Dispatch_disable();
137
[c0bff5e]138  switch (start_state) {
139    case SCHEDULER_SMP_NODE_SCHEDULED:
[900d337f]140      change_priority(executing, 1, true);
[c0bff5e]141      break;
142    case SCHEDULER_SMP_NODE_READY:
[900d337f]143      change_priority(executing, 4, true);
[c0bff5e]144      break;
145    default:
146      rtems_test_assert(0);
147      break;
148  }
[501043a]149  rtems_test_assert(executing_node->state == start_state);
[c0bff5e]150
[900d337f]151  change_priority(executing, prio, prepend_it);
[501043a]152  rtems_test_assert(executing_node->state == new_state);
[33e30f39]153
154  change_priority(executing, 1, true);
[501043a]155  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
[33e30f39]156
157  _Thread_Dispatch_enable( cpu_self );
[c0bff5e]158}
159
160static const Scheduler_SMP_Node_state states[2] = {
161  SCHEDULER_SMP_NODE_SCHEDULED,
162  SCHEDULER_SMP_NODE_READY
163};
164
165static const Priority_Control priorities[2] = { 2, 5 };
166
167static const bool prepend_it[2] = { true, false };
168
[4962d24d]169static void test_change_priority(void)
[c0bff5e]170{
171  rtems_status_code sc;
172  rtems_id task_id;
173  Thread_Control *executing;
[501043a]174  Scheduler_SMP_Node *executing_node;
[c0bff5e]175  size_t i;
176  size_t j;
177  size_t k;
178
[8568341]179  task_id = start_task(3);
[33e30f39]180  executing = _Thread_Get_executing();
[501043a]181  executing_node = get_scheduler_node(executing);
[c0bff5e]182
183  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
184    for (j = 0; j < RTEMS_ARRAY_SIZE(priorities); ++j) {
185      for (k = 0; k < RTEMS_ARRAY_SIZE(prepend_it); ++k) {
[8568341]186        test_case_change_priority(
[c0bff5e]187          executing,
[501043a]188          executing_node,
[c0bff5e]189          states[i],
190          priorities[j],
191          prepend_it[k],
192          states[j]
193        );
194      }
195    }
196  }
197
198  sc = rtems_task_delete(task_id);
199  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
200}
201
[9c238e1]202static void update_priority_op(
[8568341]203  Thread_Control *thread,
[501043a]204  Scheduler_SMP_Node *scheduler_node,
[8568341]205  Priority_Control new_priority,
206  bool prepend_it
207)
208{
[bd12dda]209  const Scheduler_Control *scheduler;
210  ISR_lock_Context state_lock_context;
211  ISR_lock_Context scheduler_lock_context;
[300f6a48]212  Thread_queue_Context queue_context;
[9bfad8c]213
[300f6a48]214  apply_priority(thread, new_priority, prepend_it, &queue_context);
[8568341]215
[bd12dda]216  _Thread_State_acquire( thread, &state_lock_context );
[2dd098a]217  scheduler = _Thread_Scheduler_get_home( thread );
[bd12dda]218  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
219
[9c238e1]220  (*scheduler->Operations.update_priority)(
[501043a]221    scheduler,
222    thread,
223    &scheduler_node->Base
224  );
[bd12dda]225
226  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
227  _Thread_State_release( thread, &state_lock_context );
[8568341]228}
229
[9bfad8c]230static void test_case_update_priority_op(
[8568341]231  Thread_Control *executing,
232  Scheduler_SMP_Node *executing_node,
233  Thread_Control *other,
234  Scheduler_SMP_Node_state start_state,
235  Priority_Control prio,
236  bool prepend_it,
237  Scheduler_SMP_Node_state new_state
238)
239{
[33e30f39]240  Per_CPU_Control *cpu_self;
241
242  cpu_self = _Thread_Dispatch_disable();
[8568341]243
244  switch (start_state) {
245    case SCHEDULER_SMP_NODE_SCHEDULED:
[900d337f]246      change_priority(executing, 1, true);
[8568341]247      break;
248    case SCHEDULER_SMP_NODE_READY:
[900d337f]249      change_priority(executing, 4, true);
[8568341]250      break;
251    default:
252      rtems_test_assert(0);
253      break;
254  }
255  rtems_test_assert(executing_node->state == start_state);
256
[9c238e1]257  update_priority_op(executing, executing_node, prio, prepend_it);
[8568341]258  rtems_test_assert(executing_node->state == new_state);
259
260  if (start_state != new_state) {
261    switch (start_state) {
262      case SCHEDULER_SMP_NODE_SCHEDULED:
[9c238e1]263        rtems_test_assert(cpu_self->heir == other);
[8568341]264        break;
265      case SCHEDULER_SMP_NODE_READY:
[9c238e1]266        rtems_test_assert(cpu_self->heir == executing);
[8568341]267        break;
268      default:
269        rtems_test_assert(0);
270        break;
271    }
272  }
[33e30f39]273
274  change_priority(executing, 1, true);
275  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
276
277  _Thread_Dispatch_enable( cpu_self );
[8568341]278}
279
[9bfad8c]280static void test_update_priority_op(void)
[8568341]281{
282  rtems_status_code sc;
283  rtems_id task_id;
284  Thread_Control *executing;
285  Scheduler_SMP_Node *executing_node;
286  Thread_Control *other;
287  size_t i;
288  size_t j;
289  size_t k;
290
291  task_id = start_task(3);
[33e30f39]292  executing = _Thread_Get_executing();
[501043a]293  executing_node = get_scheduler_node(executing);
[8568341]294
295  other = get_thread_by_id(task_id);
296
297  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
298    for (j = 0; j < RTEMS_ARRAY_SIZE(priorities); ++j) {
299      for (k = 0; k < RTEMS_ARRAY_SIZE(prepend_it); ++k) {
[9bfad8c]300        test_case_update_priority_op(
[8568341]301          executing,
302          executing_node,
303          other,
304          states[i],
305          priorities[j],
306          prepend_it[k],
307          states[j]
308        );
309      }
310    }
311  }
312
313  sc = rtems_task_delete(task_id);
314  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
315}
316
[2df4abc]317static Thread_Control *yield_op(
318  Thread_Control *thread,
319  Scheduler_SMP_Node *scheduler_node
320)
[8568341]321{
[bd12dda]322  const Scheduler_Control *scheduler;
323  ISR_lock_Context state_lock_context;
324  ISR_lock_Context scheduler_lock_context;
[8568341]325  Thread_Control *needs_help;
326
[bd12dda]327  _Thread_State_acquire( thread, &state_lock_context );
[2dd098a]328  scheduler = _Thread_Scheduler_get_home( thread );
[bd12dda]329  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
330
[2df4abc]331  needs_help = (*scheduler->Operations.yield)(
332    scheduler,
333    thread,
334    &scheduler_node->Base
335  );
[bd12dda]336
337  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
338  _Thread_State_release( thread, &state_lock_context );
[8568341]339
340  return needs_help;
341}
342
343static void test_case_yield_op(
344  Thread_Control *executing,
345  Scheduler_SMP_Node *executing_node,
346  Thread_Control *other,
347  Scheduler_SMP_Node_state start_state,
348  Scheduler_SMP_Node_state new_state
349)
350{
351  Thread_Control *needs_help;
[33e30f39]352  Per_CPU_Control *cpu_self;
353
354  cpu_self = _Thread_Dispatch_disable();
[8568341]355
[900d337f]356  change_priority(executing, 4, false);
357  change_priority(other, 4, false);
[8568341]358
359  switch (start_state) {
360    case SCHEDULER_SMP_NODE_SCHEDULED:
361      switch (new_state) {
362        case SCHEDULER_SMP_NODE_SCHEDULED:
[900d337f]363          change_priority(executing, 2, false);
364          change_priority(other, 3, false);
[8568341]365          break;
366        case SCHEDULER_SMP_NODE_READY:
[900d337f]367          change_priority(executing, 2, false);
368          change_priority(other, 2, false);
[8568341]369          break;
370        default:
371          rtems_test_assert(0);
372          break;
373      }
374      break;
375    case SCHEDULER_SMP_NODE_READY:
376      switch (new_state) {
377        case SCHEDULER_SMP_NODE_SCHEDULED:
378          rtems_test_assert(0);
379          break;
380        case SCHEDULER_SMP_NODE_READY:
[900d337f]381          change_priority(executing, 3, false);
382          change_priority(other, 2, false);
[8568341]383          break;
384        default:
385          rtems_test_assert(0);
386          break;
387      }
388      break;
389    default:
390      rtems_test_assert(0);
391      break;
392  }
393  rtems_test_assert(executing_node->state == start_state);
394
[2df4abc]395  needs_help = yield_op(executing, executing_node);
[8568341]396  rtems_test_assert(executing_node->state == new_state);
397
398  if (start_state != new_state) {
399    switch (start_state) {
400      case SCHEDULER_SMP_NODE_SCHEDULED:
401        rtems_test_assert(needs_help == executing);
402        break;
403      case SCHEDULER_SMP_NODE_READY:
404        rtems_test_assert(needs_help == other);
405        break;
406      default:
407        rtems_test_assert(0);
408        break;
409    }
410  } else {
411    rtems_test_assert(needs_help == NULL);
412  }
[33e30f39]413
414  change_priority(executing, 1, true);
415  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
416
417  _Thread_Dispatch_enable( cpu_self );
[8568341]418}
419
420static void test_yield_op(void)
421{
422  rtems_status_code sc;
423  rtems_id task_id;
424  Thread_Control *executing;
425  Scheduler_SMP_Node *executing_node;
426  Thread_Control *other;
427  size_t i;
428  size_t j;
429
430  task_id = start_task(2);
[33e30f39]431  executing = _Thread_Get_executing();
[501043a]432  executing_node = get_scheduler_node(executing);
[8568341]433
434  other = get_thread_by_id(task_id);
435
436  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
437    for (j = 0; j < RTEMS_ARRAY_SIZE(states); ++j) {
438      if (
439        states[i] != SCHEDULER_SMP_NODE_READY
440          || states[j] != SCHEDULER_SMP_NODE_SCHEDULED
441      ) {
442        test_case_yield_op(
443          executing,
444          executing_node,
445          other,
446          states[i],
447          states[j]
448        );
449      }
450    }
451  }
452
453  sc = rtems_task_delete(task_id);
454  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
455}
456
[e382a1b]457static void block_op(
458  Thread_Control *thread,
459  Scheduler_SMP_Node *scheduler_node
460)
[8568341]461{
[bd12dda]462  const Scheduler_Control *scheduler;
463  ISR_lock_Context state_lock_context;
464  ISR_lock_Context scheduler_lock_context;
465
466  _Thread_State_acquire( thread, &state_lock_context );
[2dd098a]467  scheduler = _Thread_Scheduler_get_home( thread );
[bd12dda]468  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
[8568341]469
[e382a1b]470  (*scheduler->Operations.block)(scheduler, thread, &scheduler_node->Base);
[bd12dda]471
472  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
473  _Thread_State_release( thread, &state_lock_context );
[8568341]474}
475
[72e0bdb]476static Thread_Control *unblock_op(
477  Thread_Control *thread,
478  Scheduler_SMP_Node *scheduler_node
479)
[8568341]480{
[bd12dda]481  const Scheduler_Control *scheduler;
482  ISR_lock_Context state_lock_context;
483  ISR_lock_Context scheduler_lock_context;
[8568341]484  Thread_Control *needs_help;
485
[bd12dda]486  _Thread_State_acquire( thread, &state_lock_context );
[2dd098a]487  scheduler = _Thread_Scheduler_get_home( thread );
[bd12dda]488  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
489
[72e0bdb]490  needs_help = (*scheduler->Operations.unblock)(
491    scheduler,
492    thread,
493    &scheduler_node->Base
494  );
[bd12dda]495
496  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
497  _Thread_State_release( thread, &state_lock_context );
[8568341]498
499  return needs_help;
500}
501
502static void test_case_unblock_op(
503  Thread_Control *executing,
504  Scheduler_SMP_Node *executing_node,
505  Thread_Control *other,
506  Scheduler_SMP_Node_state new_state
507)
508{
509  Thread_Control *needs_help;
[33e30f39]510  Per_CPU_Control *cpu_self;
511
512  cpu_self = _Thread_Dispatch_disable();
[8568341]513
514  switch (new_state) {
515    case SCHEDULER_SMP_NODE_SCHEDULED:
[900d337f]516      change_priority(executing, 2, false);
[8568341]517      rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
518      break;
519    case SCHEDULER_SMP_NODE_READY:
[900d337f]520      change_priority(executing, 4, false);
[8568341]521      rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_READY);
522      break;
523    default:
524      rtems_test_assert(0);
525      break;
526  }
527
[e382a1b]528  block_op(executing, executing_node);
[8568341]529  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_BLOCKED);
530
[72e0bdb]531  needs_help = unblock_op(executing, executing_node);
[8568341]532  rtems_test_assert(executing_node->state == new_state);
533
534  switch (new_state) {
535    case SCHEDULER_SMP_NODE_SCHEDULED:
536      rtems_test_assert(needs_help == other);
537      break;
538    case SCHEDULER_SMP_NODE_READY:
539      rtems_test_assert(needs_help == executing);
540      break;
541    default:
542      rtems_test_assert(0);
543      break;
544  }
[33e30f39]545
546  change_priority(executing, 1, true);
547  rtems_test_assert(executing_node->state == SCHEDULER_SMP_NODE_SCHEDULED);
548
549  _Thread_Dispatch_enable( cpu_self );
[8568341]550}
551
552static void test_unblock_op(void)
553{
554  rtems_status_code sc;
555  rtems_id task_id;
556  Thread_Control *executing;
557  Scheduler_SMP_Node *executing_node;
558  Thread_Control *other;
559  size_t i;
560
561  task_id = start_task(3);
[33e30f39]562  executing = _Thread_Get_executing();
[501043a]563  executing_node = get_scheduler_node(executing);
[8568341]564
565  other = get_thread_by_id(task_id);
566
567  for (i = 0; i < RTEMS_ARRAY_SIZE(states); ++i) {
568    test_case_unblock_op(
569      executing,
570      executing_node,
571      other,
572      states[i]
573    );
574  }
575
576  sc = rtems_task_delete(task_id);
577  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
578}
579
580static void tests(void)
581{
582  test_change_priority();
[9bfad8c]583  test_update_priority_op();
[8568341]584  test_yield_op();
585  test_unblock_op();
586}
587
[4962d24d]588static void test_task(rtems_task_argument arg)
589{
590  test_context *ctx = &test_instance;
591
[8568341]592  tests();
[4962d24d]593
594  ctx->cpu_index[arg] = rtems_get_current_processor();
595
596  barrier_wait(ctx);
597
598  rtems_task_suspend(RTEMS_SELF);
599  rtems_test_assert(0);
600}
601
602static void done(uint32_t cpu_index)
603{
604  printf("test done on processor %" PRIu32 "\n", cpu_index);
605}
606
[c0bff5e]607static void Init(rtems_task_argument arg)
608{
[4962d24d]609  test_context *ctx = &test_instance;
610  rtems_status_code sc;
611  rtems_resource_snapshot snapshot;
612  uint32_t cpu_count = rtems_get_processor_count();
613  uint32_t cpu_index;
614
[c0bff5e]615  TEST_BEGIN();
616
[4962d24d]617  rtems_resource_snapshot_take(&snapshot);
618
619  sc = rtems_barrier_create(
620    rtems_build_name('B', 'A', 'R', 'I'),
621    RTEMS_BARRIER_AUTOMATIC_RELEASE,
622    cpu_count,
623    &ctx->barrier_id
624  );
625  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
626
627  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
628    rtems_id scheduler_id;
629
630    sc = rtems_task_create(
631      rtems_build_name('T', 'A', 'S', 'K'),
[c0bd006]632      255,
[4962d24d]633      RTEMS_MINIMUM_STACK_SIZE,
634      RTEMS_DEFAULT_MODES,
635      RTEMS_DEFAULT_ATTRIBUTES,
636      &ctx->task_id[cpu_index]
637    );
638    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
639
640    sc = rtems_scheduler_ident(SCHED_NAME(cpu_index), &scheduler_id);
641    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
642
[c0bd006]643    sc = rtems_task_set_scheduler(ctx->task_id[cpu_index], scheduler_id, 1);
[4962d24d]644    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
645
646    sc = rtems_task_start(ctx->task_id[cpu_index], test_task, cpu_index);
647    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
648  }
649
[8568341]650  tests();
[4962d24d]651
652  barrier_wait(ctx);
653
654  sc = rtems_barrier_delete(ctx->barrier_id);
655  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
656
657  done(0);
658
659  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
660    sc = rtems_task_delete(ctx->task_id[cpu_index]);
661    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
662
663    rtems_test_assert(ctx->cpu_index[cpu_index] == cpu_index);
664
665    done(cpu_index);
666  }
667
668  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
[c0bff5e]669
670  TEST_END();
671  rtems_test_exit(0);
672}
673
674#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
675#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
676
677#define CONFIGURE_SMP_APPLICATION
678
[4962d24d]679#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_MAX
680
681#define CONFIGURE_MAXIMUM_PRIORITY 255
682
683#define CONFIGURE_SCHEDULER_PRIORITY_SMP
684#define CONFIGURE_SCHEDULER_SIMPLE_SMP
685#define CONFIGURE_SCHEDULER_PRIORITY_AFFINITY_SMP
686
687#include <rtems/scheduler.h>
688
689RTEMS_SCHEDULER_CONTEXT_PRIORITY_SMP(a, CONFIGURE_MAXIMUM_PRIORITY + 1);
690
691RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(b);
692
693RTEMS_SCHEDULER_CONTEXT_PRIORITY_AFFINITY_SMP(
694  c,
695  CONFIGURE_MAXIMUM_PRIORITY + 1
696);
697
698#define CONFIGURE_SCHEDULER_CONTROLS \
699  RTEMS_SCHEDULER_CONTROL_PRIORITY_SMP(a, SCHED_NAME(0)), \
700  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(b, SCHED_NAME(1)), \
701  RTEMS_SCHEDULER_CONTROL_PRIORITY_AFFINITY_SMP(c, SCHED_NAME(2))
702
703#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
704  RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
705  RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
706  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
[c0bff5e]707
[4962d24d]708#define CONFIGURE_MAXIMUM_TASKS 6
709#define CONFIGURE_MAXIMUM_BARRIERS 1
[c0bff5e]710
711#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
712
713#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
714
715#define CONFIGURE_INIT
716
717#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.