source: rtems/testsuites/smptests/smpatomic01/init.c @ f95fa387

5
Last change on this file since f95fa387 was f95fa387, checked in by Sebastian Huber <sebastian.huber@…>, on 02/01/17 at 11:11:33

Remove CONFIGURE_SMP_APPLICATION

Enable the SMP support if CONFIGURE_SMP_MAXIMUM_PROCESSORS > 1.

Update #2893.

  • Property mode set to 100644
File size: 18.5 KB
Line 
1/*
2 * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * Copyright (c) 2013 Deng Hengyi.
11 *
12 * The license and distribution terms for this file may be
13 * found in the file LICENSE in this distribution or at
14 * http://www.rtems.org/license/LICENSE.
15 */
16
17#ifdef HAVE_CONFIG_H
18  #include "config.h"
19#endif
20
21#include <rtems/score/atomic.h>
22#include <rtems/score/smpbarrier.h>
23#include <rtems.h>
24#include <rtems/bsd.h>
25#include <rtems/test.h>
26#include <limits.h>
27#include <string.h>
28
29#include "tmacros.h"
30
31const char rtems_test_name[] = "SMPATOMIC 1";
32
33#define MS_PER_TICK 10
34
35#define MASTER_PRIORITY 1
36
37#define WORKER_PRIORITY 2
38
39#define CPU_COUNT 32
40
41typedef struct {
42  rtems_test_parallel_context base;
43  Atomic_Ulong atomic_value;
44  unsigned long per_worker_value[CPU_COUNT];
45  unsigned long normal_value;
46  char unused_space_for_cache_line_separation[128];
47  unsigned long second_value;
48  Atomic_Flag global_flag;
49  SMP_barrier_Control barrier;
50  SMP_barrier_State barrier_state[CPU_COUNT];
51  sbintime_t load_trigger_time;
52  sbintime_t load_change_time[CPU_COUNT];
53  int load_count[CPU_COUNT];
54  sbintime_t rmw_trigger_time;
55  sbintime_t rmw_change_time[CPU_COUNT];
56  int rmw_count[CPU_COUNT];
57} smpatomic01_context;
58
59static smpatomic01_context test_instance;
60
61static rtems_interval test_duration(void)
62{
63  return rtems_clock_get_ticks_per_second();
64}
65
66static void test_fini(
67  smpatomic01_context *ctx,
68  const char *test,
69  bool atomic
70)
71{
72  unsigned long expected_value = 0;
73  unsigned long actual_value;
74  size_t worker_index;
75
76  printf("=== atomic %s test case ===\n", test);
77
78  for (
79    worker_index = 0;
80    worker_index < ctx->base.worker_count;
81    ++worker_index
82  ) {
83    unsigned long worker_value = ctx->per_worker_value[worker_index];
84
85    expected_value += worker_value;
86
87    printf(
88      "worker %zu value: %lu\n",
89      worker_index,
90      worker_value
91    );
92  }
93
94  if (atomic) {
95    actual_value = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
96  } else {
97    actual_value = ctx->normal_value;
98  }
99
100  printf(
101    "atomic value: expected = %lu, actual = %lu\n",
102    expected_value,
103    actual_value
104  );
105
106  rtems_test_assert(expected_value == actual_value);
107}
108
109
110static rtems_interval test_atomic_add_init(
111  rtems_test_parallel_context *base,
112  void *arg,
113  size_t active_workers
114)
115{
116  smpatomic01_context *ctx = (smpatomic01_context *) base;
117
118  _Atomic_Init_ulong(&ctx->atomic_value, 0);
119
120  return test_duration();
121}
122
123static void test_atomic_add_body(
124  rtems_test_parallel_context *base,
125  void *arg,
126  size_t active_workers,
127  size_t worker_index
128)
129{
130  smpatomic01_context *ctx = (smpatomic01_context *) base;
131  unsigned long counter = 0;
132
133  while (!rtems_test_parallel_stop_job(&ctx->base)) {
134    ++counter;
135    _Atomic_Fetch_add_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
136  }
137
138  ctx->per_worker_value[worker_index] = counter;
139}
140
141static void test_atomic_add_fini(
142  rtems_test_parallel_context *base,
143  void *arg,
144  size_t active_workers
145)
146{
147  smpatomic01_context *ctx = (smpatomic01_context *) base;
148
149  test_fini(ctx, "add", true);
150}
151
152static rtems_interval test_atomic_flag_init(
153  rtems_test_parallel_context *base,
154  void *arg,
155  size_t active_workers
156)
157{
158  smpatomic01_context *ctx = (smpatomic01_context *) base;
159
160  _Atomic_Flag_clear(&ctx->global_flag, ATOMIC_ORDER_RELEASE);
161  ctx->normal_value = 0;
162
163  return test_duration();
164}
165
166static void test_atomic_flag_body(
167  rtems_test_parallel_context *base,
168  void *arg,
169  size_t active_workers,
170  size_t worker_index
171)
172{
173  smpatomic01_context *ctx = (smpatomic01_context *) base;
174  unsigned long counter = 0;
175
176  while (!rtems_test_parallel_stop_job(&ctx->base)) {
177    while (_Atomic_Flag_test_and_set(&ctx->global_flag, ATOMIC_ORDER_ACQUIRE)) {
178      /* Wait */
179    }
180
181    ++counter;
182    ++ctx->normal_value;
183
184    _Atomic_Flag_clear(&ctx->global_flag, ATOMIC_ORDER_RELEASE);
185  }
186
187  ctx->per_worker_value[worker_index] = counter;
188}
189
190static void test_atomic_flag_fini(
191  rtems_test_parallel_context *base,
192  void *arg,
193  size_t active_workers
194  )
195{
196  smpatomic01_context *ctx = (smpatomic01_context *) base;
197
198  test_fini(ctx, "flag", false);
199}
200
201static rtems_interval test_atomic_sub_init(
202  rtems_test_parallel_context *base,
203  void *arg,
204  size_t active_workers
205)
206{
207  smpatomic01_context *ctx = (smpatomic01_context *) base;
208
209  _Atomic_Init_ulong(&ctx->atomic_value, 0);
210
211  return test_duration();
212}
213
214static void test_atomic_sub_body(
215  rtems_test_parallel_context *base,
216  void *arg,
217  size_t active_workers,
218  size_t worker_index
219)
220{
221  smpatomic01_context *ctx = (smpatomic01_context *) base;
222  unsigned long counter = 0;
223
224  while (!rtems_test_parallel_stop_job(&ctx->base)) {
225    --counter;
226    _Atomic_Fetch_sub_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
227  }
228
229  ctx->per_worker_value[worker_index] = counter;
230}
231
232static void test_atomic_sub_fini(
233  rtems_test_parallel_context *base,
234  void *arg,
235  size_t active_workers
236)
237{
238  smpatomic01_context *ctx = (smpatomic01_context *) base;
239
240  test_fini(ctx, "sub", true);
241}
242
243static rtems_interval test_atomic_compare_exchange_init(
244  rtems_test_parallel_context *base,
245  void *arg,
246  size_t active_workers
247)
248{
249  smpatomic01_context *ctx = (smpatomic01_context *) base;
250
251  _Atomic_Init_ulong(&ctx->atomic_value, 0);
252  ctx->normal_value = 0;
253
254  return test_duration();
255}
256
257static void test_atomic_compare_exchange_body(
258  rtems_test_parallel_context *base,
259  void *arg,
260  size_t active_workers,
261  size_t worker_index
262)
263{
264  smpatomic01_context *ctx = (smpatomic01_context *) base;
265  unsigned long counter = 0;
266
267  while (!rtems_test_parallel_stop_job(&ctx->base)) {
268    bool success;
269
270    do {
271      unsigned long zero = 0;
272
273      success = _Atomic_Compare_exchange_ulong(
274        &ctx->atomic_value,
275        &zero,
276        1,
277        ATOMIC_ORDER_ACQUIRE,
278        ATOMIC_ORDER_RELAXED
279      );
280    } while (!success);
281
282    ++counter;
283    ++ctx->normal_value;
284
285    _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
286  }
287
288  ctx->per_worker_value[worker_index] = counter;
289}
290
291static void test_atomic_compare_exchange_fini(
292  rtems_test_parallel_context *base,
293  void *arg,
294  size_t active_workers
295)
296{
297  smpatomic01_context *ctx = (smpatomic01_context *) base;
298
299  test_fini(ctx, "compare exchange", false);
300}
301
302static rtems_interval test_atomic_or_and_init(
303  rtems_test_parallel_context *base,
304  void *arg,
305  size_t active_workers
306)
307{
308  smpatomic01_context *ctx = (smpatomic01_context *) base;
309
310  _Atomic_Init_ulong(&ctx->atomic_value, 0);
311
312  return test_duration();
313}
314
315static void test_atomic_or_and_body(
316  rtems_test_parallel_context *base,
317  void *arg,
318  size_t active_workers,
319  size_t worker_index
320)
321{
322  smpatomic01_context *ctx = (smpatomic01_context *) base;
323  unsigned long the_bit = 1UL << worker_index;
324  unsigned long current_bit = 0;
325
326  while (!rtems_test_parallel_stop_job(&ctx->base)) {
327    unsigned long previous;
328
329    if (current_bit != 0) {
330      previous = _Atomic_Fetch_and_ulong(
331        &ctx->atomic_value,
332        ~the_bit,
333        ATOMIC_ORDER_RELAXED
334      );
335      current_bit = 0;
336    } else {
337      previous = _Atomic_Fetch_or_ulong(
338        &ctx->atomic_value,
339        the_bit,
340        ATOMIC_ORDER_RELAXED
341      );
342      current_bit = the_bit;
343    }
344
345    rtems_test_assert((previous & the_bit) != current_bit);
346  }
347
348  ctx->per_worker_value[worker_index] = current_bit;
349}
350
351static void test_atomic_or_and_fini(
352  rtems_test_parallel_context *base,
353  void *arg,
354  size_t active_workers
355)
356{
357  smpatomic01_context *ctx = (smpatomic01_context *) base;
358
359  test_fini(ctx, "or/and", true);
360}
361
362static rtems_interval test_atomic_fence_init(
363  rtems_test_parallel_context *base,
364  void *arg,
365  size_t active_workers
366)
367{
368  smpatomic01_context *ctx = (smpatomic01_context *) base;
369
370  ctx->normal_value = 0;
371  ctx->second_value = 0;
372  _Atomic_Fence(ATOMIC_ORDER_RELEASE);
373
374  return test_duration();
375}
376
377static void test_atomic_fence_body(
378  rtems_test_parallel_context *base,
379  void *arg,
380  size_t active_workers,
381  size_t worker_index
382)
383{
384  smpatomic01_context *ctx = (smpatomic01_context *) base;
385
386  if (rtems_test_parallel_is_master_worker(worker_index)) {
387    unsigned long counter = 0;
388
389    while (!rtems_test_parallel_stop_job(&ctx->base)) {
390      ++counter;
391      ctx->normal_value = counter;
392      _Atomic_Fence(ATOMIC_ORDER_RELEASE);
393      ctx->second_value = counter;
394    }
395  } else {
396    while (!rtems_test_parallel_stop_job(&ctx->base)) {
397      unsigned long n;
398      unsigned long s;
399
400      s = ctx->second_value;
401      _Atomic_Fence(ATOMIC_ORDER_ACQUIRE);
402      n = ctx->normal_value;
403
404      rtems_test_assert(n - s < LONG_MAX);
405    }
406  }
407}
408
409static void test_atomic_fence_fini(
410  rtems_test_parallel_context *base,
411  void *arg,
412  size_t active_workers
413)
414{
415  smpatomic01_context *ctx = (smpatomic01_context *) base;
416
417  printf(
418    "=== atomic fence test case ===\n"
419    "normal value = %lu, second value = %lu\n",
420    ctx->normal_value,
421    ctx->second_value
422  );
423}
424
425static rtems_interval test_atomic_store_load_rmw_init(
426  rtems_test_parallel_context *base,
427  void *arg,
428  size_t active_workers
429)
430{
431  smpatomic01_context *ctx = (smpatomic01_context *) base;
432  size_t i;
433
434  _Atomic_Init_ulong(&ctx->atomic_value, 0);
435
436  _SMP_barrier_Control_initialize(&ctx->barrier);
437
438  for (i = 0; i < active_workers; ++i) {
439    _SMP_barrier_State_initialize(&ctx->barrier_state[i]);
440  }
441
442  return 0;
443}
444
445static sbintime_t now(void)
446{
447  struct bintime bt;
448
449  rtems_bsd_binuptime(&bt);
450  return bttosbt(bt);
451}
452
453static void test_atomic_store_load_rmw_body(
454  rtems_test_parallel_context *base,
455  void *arg,
456  size_t active_workers,
457  size_t worker_index
458)
459{
460  smpatomic01_context *ctx = (smpatomic01_context *) base;
461  uint32_t cpu_self_index;
462  sbintime_t t;
463  int counter;
464
465  if (rtems_test_parallel_is_master_worker(worker_index)) {
466    rtems_status_code sc;
467
468    sc = rtems_task_wake_after(1);
469    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
470
471    t = now();
472    t += (MS_PER_TICK / 2) * SBT_1MS;
473    ctx->load_trigger_time = t;
474    t += MS_PER_TICK * SBT_1MS;
475    ctx->rmw_trigger_time = t;
476  }
477
478  _Atomic_Fence(ATOMIC_ORDER_SEQ_CST);
479
480  _SMP_barrier_Wait(
481    &ctx->barrier,
482    &ctx->barrier_state[worker_index],
483    active_workers
484  );
485
486  /*
487   * Use the physical processor index, to observe timing differences introduced
488   * by the system topology.
489   */
490  cpu_self_index = rtems_get_current_processor();
491
492  /* Store release and load acquire test case */
493
494  counter = 0;
495  t = ctx->load_trigger_time;
496
497  while (now() < t) {
498    /* Wait */
499  }
500
501  if (cpu_self_index == 0) {
502    _Atomic_Store_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELEASE);
503  } else {
504    while (_Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_ACQUIRE) == 0) {
505      ++counter;
506    }
507  }
508
509  ctx->load_change_time[cpu_self_index] = now();
510  ctx->load_count[cpu_self_index] = counter;
511
512  /* Read-modify-write test case */
513
514  if (cpu_self_index == 0) {
515    _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELAXED);
516  }
517
518  counter = 0;
519  t = ctx->rmw_trigger_time;
520
521  while (now() < t) {
522    /* Wait */
523  }
524
525  if (cpu_self_index == 0) {
526    _Atomic_Store_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
527  } else {
528    while (
529      (_Atomic_Fetch_or_ulong(&ctx->atomic_value, 2, ATOMIC_ORDER_RELAXED) & 1)
530        == 0
531    ) {
532      ++counter;
533    }
534  }
535
536  ctx->rmw_change_time[cpu_self_index] = now();
537  ctx->rmw_count[cpu_self_index] = counter;
538}
539
540static void test_atomic_store_load_rmw_fini(
541  rtems_test_parallel_context *base,
542  void *arg,
543  size_t active_workers
544)
545{
546  smpatomic01_context *ctx = (smpatomic01_context *) base;
547  size_t i;
548  struct bintime bt;
549  struct timespec ts;
550
551  printf("=== atomic store release and load acquire test case ===\n");
552
553  for (i = 0; i < active_workers; ++i) {
554    bt = sbttobt(ctx->load_change_time[i] - ctx->load_trigger_time);
555    bintime2timespec(&bt, &ts);
556    printf(
557      "processor %zu delta %lins, load count %i\n",
558      i,
559      ts.tv_nsec,
560      ctx->load_count[i]
561    );
562  }
563
564  printf("=== atomic read-modify-write test case ===\n");
565
566  for (i = 0; i < active_workers; ++i) {
567    bt = sbttobt(ctx->rmw_change_time[i] - ctx->rmw_trigger_time);
568    bintime2timespec(&bt, &ts);
569    printf(
570      "processor %zu delta %lins, read-modify-write count %i\n",
571      i,
572      ts.tv_nsec,
573      ctx->rmw_count[i]
574    );
575  }
576}
577
578/*
579 * See also Hans-J. Boehm, HP Laboratories,
580 * "Can Seqlocks Get Along With Programming Language Memory Models?",
581 * http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
582 */
583
584static rtems_interval test_seqlock_init(
585  rtems_test_parallel_context *base,
586  void *arg,
587  size_t active_workers
588)
589{
590  smpatomic01_context *ctx = (smpatomic01_context *) base;
591
592  ctx->normal_value = 0;
593  ctx->second_value = 0;
594  _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
595
596  return test_duration();
597}
598
599static unsigned long seqlock_read(smpatomic01_context *ctx)
600{
601  unsigned long counter = 0;
602
603  while (!rtems_test_parallel_stop_job(&ctx->base)) {
604    unsigned long seq0;
605    unsigned long seq1;
606    unsigned long a;
607    unsigned long b;
608
609    do {
610      seq0 = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_ACQUIRE);
611
612      a = ctx->normal_value;
613      b = ctx->second_value;
614
615      seq1 =
616        _Atomic_Fetch_add_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
617    } while (seq0 != seq1 || seq0 % 2 != 0);
618
619    ++counter;
620    rtems_test_assert(a == b);
621  }
622
623  return counter;
624}
625
626static void test_single_writer_seqlock_body(
627  rtems_test_parallel_context *base,
628  void *arg,
629  size_t active_workers,
630  size_t worker_index
631)
632{
633  smpatomic01_context *ctx = (smpatomic01_context *) base;
634  uint32_t cpu_self_index;
635  unsigned long counter;
636
637  /*
638   * Use the physical processor index, to observe timing differences introduced
639   * by the system topology.
640   */
641  cpu_self_index = rtems_get_current_processor();
642
643  if (cpu_self_index == 0) {
644    counter = 0;
645
646    while (!rtems_test_parallel_stop_job(&ctx->base)) {
647      unsigned long seq;
648
649      seq = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
650      _Atomic_Store_ulong(&ctx->atomic_value, seq + 1, ATOMIC_ORDER_RELAXED);
651
652      /* There is no atomic store with acquire/release semantics */
653      _Atomic_Fence(ATOMIC_ORDER_ACQ_REL);
654
655      ++counter;
656      ctx->normal_value = counter;
657      ctx->second_value = counter;
658
659      _Atomic_Store_ulong(&ctx->atomic_value, seq + 2, ATOMIC_ORDER_RELEASE);
660    }
661  } else {
662    counter = seqlock_read(ctx);
663  }
664
665  ctx->per_worker_value[cpu_self_index] = counter;
666}
667
668static void test_single_writer_seqlock_fini(
669  rtems_test_parallel_context *base,
670  void *arg,
671  size_t active_workers
672)
673{
674  smpatomic01_context *ctx = (smpatomic01_context *) base;
675  size_t i;
676
677  printf("=== single writer seqlock test case ===\n");
678
679  for (i = 0; i < active_workers; ++i) {
680    printf(
681      "processor %zu count %lu\n",
682      i,
683      ctx->per_worker_value[i]
684    );
685  }
686}
687
688static void test_multi_writer_seqlock_body(
689  rtems_test_parallel_context *base,
690  void *arg,
691  size_t active_workers,
692  size_t worker_index
693)
694{
695  smpatomic01_context *ctx = (smpatomic01_context *) base;
696  uint32_t cpu_self_index;
697  unsigned long counter;
698
699  /*
700   * Use the physical processor index, to observe timing differences introduced
701   * by the system topology.
702   */
703  cpu_self_index = rtems_get_current_processor();
704
705  if (cpu_self_index % 2 == 0) {
706    counter = 0;
707
708    while (!rtems_test_parallel_stop_job(&ctx->base)) {
709      unsigned long seq;
710
711      do {
712        seq = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
713      } while (
714        seq % 2 != 0
715          || !_Atomic_Compare_exchange_ulong(
716              &ctx->atomic_value,
717              &seq,
718              seq + 1,
719              ATOMIC_ORDER_ACQ_REL,
720              ATOMIC_ORDER_RELAXED
721            )
722      );
723
724      ++counter;
725      ctx->normal_value = counter;
726      ctx->second_value = counter;
727
728      _Atomic_Store_ulong(&ctx->atomic_value, seq + 2, ATOMIC_ORDER_RELEASE);
729    }
730  } else {
731    counter = seqlock_read(ctx);
732  }
733
734  ctx->per_worker_value[cpu_self_index] = counter;
735}
736
737static void test_multi_writer_seqlock_fini(
738  rtems_test_parallel_context *base,
739  void *arg,
740  size_t active_workers
741)
742{
743  smpatomic01_context *ctx = (smpatomic01_context *) base;
744  size_t i;
745
746  printf("=== multi writer seqlock test case ===\n");
747
748  for (i = 0; i < active_workers; ++i) {
749    printf(
750      "processor %zu count %lu\n",
751      i,
752      ctx->per_worker_value[i]
753    );
754  }
755}
756
757static const rtems_test_parallel_job test_jobs[] = {
758  {
759    .init = test_atomic_add_init,
760    .body = test_atomic_add_body,
761    .fini = test_atomic_add_fini
762  }, {
763    .init = test_atomic_flag_init,
764    .body = test_atomic_flag_body,
765    .fini = test_atomic_flag_fini
766  }, {
767    .init = test_atomic_sub_init,
768    .body = test_atomic_sub_body,
769    .fini = test_atomic_sub_fini
770  }, {
771    .init = test_atomic_compare_exchange_init,
772    .body = test_atomic_compare_exchange_body,
773    .fini = test_atomic_compare_exchange_fini
774  }, {
775    .init = test_atomic_or_and_init,
776    .body = test_atomic_or_and_body,
777    .fini = test_atomic_or_and_fini
778  }, {
779    .init = test_atomic_fence_init,
780    .body = test_atomic_fence_body,
781    .fini = test_atomic_fence_fini
782  }, {
783    .init = test_atomic_store_load_rmw_init,
784    .body = test_atomic_store_load_rmw_body,
785    .fini = test_atomic_store_load_rmw_fini
786  }, {
787    .init = test_seqlock_init,
788    .body = test_single_writer_seqlock_body,
789    .fini = test_single_writer_seqlock_fini
790  }, {
791    .init = test_seqlock_init,
792    .body = test_multi_writer_seqlock_body,
793    .fini = test_multi_writer_seqlock_fini
794  }
795};
796
797static void setup_worker(
798  rtems_test_parallel_context *base,
799  size_t worker_index,
800  rtems_id worker_id
801)
802{
803  rtems_status_code sc;
804  rtems_task_priority prio;
805
806  sc = rtems_task_set_priority(worker_id, WORKER_PRIORITY, &prio);
807  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
808}
809
810static void Init(rtems_task_argument arg)
811{
812  smpatomic01_context *ctx = &test_instance;
813
814  TEST_BEGIN();
815
816  rtems_test_parallel(
817    &ctx->base,
818    setup_worker,
819    &test_jobs[0],
820    RTEMS_ARRAY_SIZE(test_jobs)
821  );
822
823  TEST_END();
824  rtems_test_exit(0);
825}
826
827#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
828#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
829
830#define CONFIGURE_MICROSECONDS_PER_TICK (MS_PER_TICK * 1000)
831
832#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
833
834#define CONFIGURE_MAXIMUM_TASKS CPU_COUNT
835
836#define CONFIGURE_MAXIMUM_TIMERS 1
837
838#define CONFIGURE_INIT_TASK_PRIORITY MASTER_PRIORITY
839#define CONFIGURE_INIT_TASK_INITIAL_MODES RTEMS_DEFAULT_MODES
840#define CONFIGURE_INIT_TASK_ATTRIBUTES RTEMS_DEFAULT_ATTRIBUTES
841
842#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
843
844#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
845
846#define CONFIGURE_INIT
847
848#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.