source: rtems/testsuites/smptests/smpatomic01/init.c

Last change on this file was bcef89f2, checked in by Sebastian Huber <sebastian.huber@…>, on 05/19/23 at 06:18:25

Update company name

The embedded brains GmbH & Co. KG is the legal successor of embedded
brains GmbH.

  • Property mode set to 100644
File size: 18.4 KB
Line 
1/*
2 * Copyright (C) 2013, 2016 embedded brains GmbH & Co. KG
3 *
4 * Copyright (c) 2013 Deng Hengyi.
5 *
6 * The license and distribution terms for this file may be
7 * found in the file LICENSE in this distribution or at
8 * http://www.rtems.org/license/LICENSE.
9 */
10
11#ifdef HAVE_CONFIG_H
12#include "config.h"
13#endif
14
15#include <rtems/score/atomic.h>
16#include <rtems/score/smpbarrier.h>
17#include <rtems.h>
18#include <rtems/bsd.h>
19#include <rtems/test-info.h>
20#include <limits.h>
21#include <string.h>
22
23#include "tmacros.h"
24
25const char rtems_test_name[] = "SMPATOMIC 1";
26
27#define MS_PER_TICK 10
28
29#define MASTER_PRIORITY 1
30
31#define WORKER_PRIORITY 2
32
33#define CPU_COUNT 32
34
35typedef struct {
36  rtems_test_parallel_context base;
37  Atomic_Ulong atomic_value;
38  unsigned long per_worker_value[CPU_COUNT];
39  unsigned long normal_value;
40  char unused_space_for_cache_line_separation[128];
41  unsigned long second_value;
42  Atomic_Flag global_flag;
43  SMP_barrier_Control barrier;
44  SMP_barrier_State barrier_state[CPU_COUNT];
45  sbintime_t load_trigger_time;
46  sbintime_t load_change_time[CPU_COUNT];
47  int load_count[CPU_COUNT];
48  sbintime_t rmw_trigger_time;
49  sbintime_t rmw_change_time[CPU_COUNT];
50  int rmw_count[CPU_COUNT];
51} smpatomic01_context;
52
53static smpatomic01_context test_instance;
54
55static rtems_interval test_duration(void)
56{
57  return rtems_clock_get_ticks_per_second();
58}
59
60static void test_fini(
61  smpatomic01_context *ctx,
62  const char *test,
63  bool atomic
64)
65{
66  unsigned long expected_value = 0;
67  unsigned long actual_value;
68  size_t worker_index;
69
70  printf("=== atomic %s test case ===\n", test);
71
72  for (
73    worker_index = 0;
74    worker_index < ctx->base.worker_count;
75    ++worker_index
76  ) {
77    unsigned long worker_value = ctx->per_worker_value[worker_index];
78
79    expected_value += worker_value;
80
81    printf(
82      "worker %zu value: %lu\n",
83      worker_index,
84      worker_value
85    );
86  }
87
88  if (atomic) {
89    actual_value = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
90  } else {
91    actual_value = ctx->normal_value;
92  }
93
94  printf(
95    "atomic value: expected = %lu, actual = %lu\n",
96    expected_value,
97    actual_value
98  );
99
100  rtems_test_assert(expected_value == actual_value);
101}
102
103
104static rtems_interval test_atomic_add_init(
105  rtems_test_parallel_context *base,
106  void *arg,
107  size_t active_workers
108)
109{
110  smpatomic01_context *ctx = (smpatomic01_context *) base;
111
112  _Atomic_Init_ulong(&ctx->atomic_value, 0);
113
114  return test_duration();
115}
116
117static void test_atomic_add_body(
118  rtems_test_parallel_context *base,
119  void *arg,
120  size_t active_workers,
121  size_t worker_index
122)
123{
124  smpatomic01_context *ctx = (smpatomic01_context *) base;
125  unsigned long counter = 0;
126
127  while (!rtems_test_parallel_stop_job(&ctx->base)) {
128    ++counter;
129    _Atomic_Fetch_add_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
130  }
131
132  ctx->per_worker_value[worker_index] = counter;
133}
134
135static void test_atomic_add_fini(
136  rtems_test_parallel_context *base,
137  void *arg,
138  size_t active_workers
139)
140{
141  smpatomic01_context *ctx = (smpatomic01_context *) base;
142
143  test_fini(ctx, "add", true);
144}
145
146static rtems_interval test_atomic_flag_init(
147  rtems_test_parallel_context *base,
148  void *arg,
149  size_t active_workers
150)
151{
152  smpatomic01_context *ctx = (smpatomic01_context *) base;
153
154  _Atomic_Flag_clear(&ctx->global_flag, ATOMIC_ORDER_RELEASE);
155  ctx->normal_value = 0;
156
157  return test_duration();
158}
159
160static void test_atomic_flag_body(
161  rtems_test_parallel_context *base,
162  void *arg,
163  size_t active_workers,
164  size_t worker_index
165)
166{
167  smpatomic01_context *ctx = (smpatomic01_context *) base;
168  unsigned long counter = 0;
169
170  while (!rtems_test_parallel_stop_job(&ctx->base)) {
171    while (_Atomic_Flag_test_and_set(&ctx->global_flag, ATOMIC_ORDER_ACQUIRE)) {
172      /* Wait */
173    }
174
175    ++counter;
176    ++ctx->normal_value;
177
178    _Atomic_Flag_clear(&ctx->global_flag, ATOMIC_ORDER_RELEASE);
179  }
180
181  ctx->per_worker_value[worker_index] = counter;
182}
183
184static void test_atomic_flag_fini(
185  rtems_test_parallel_context *base,
186  void *arg,
187  size_t active_workers
188  )
189{
190  smpatomic01_context *ctx = (smpatomic01_context *) base;
191
192  test_fini(ctx, "flag", false);
193}
194
195static rtems_interval test_atomic_sub_init(
196  rtems_test_parallel_context *base,
197  void *arg,
198  size_t active_workers
199)
200{
201  smpatomic01_context *ctx = (smpatomic01_context *) base;
202
203  _Atomic_Init_ulong(&ctx->atomic_value, 0);
204
205  return test_duration();
206}
207
208static void test_atomic_sub_body(
209  rtems_test_parallel_context *base,
210  void *arg,
211  size_t active_workers,
212  size_t worker_index
213)
214{
215  smpatomic01_context *ctx = (smpatomic01_context *) base;
216  unsigned long counter = 0;
217
218  while (!rtems_test_parallel_stop_job(&ctx->base)) {
219    --counter;
220    _Atomic_Fetch_sub_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
221  }
222
223  ctx->per_worker_value[worker_index] = counter;
224}
225
226static void test_atomic_sub_fini(
227  rtems_test_parallel_context *base,
228  void *arg,
229  size_t active_workers
230)
231{
232  smpatomic01_context *ctx = (smpatomic01_context *) base;
233
234  test_fini(ctx, "sub", true);
235}
236
237static rtems_interval test_atomic_compare_exchange_init(
238  rtems_test_parallel_context *base,
239  void *arg,
240  size_t active_workers
241)
242{
243  smpatomic01_context *ctx = (smpatomic01_context *) base;
244
245  _Atomic_Init_ulong(&ctx->atomic_value, 0);
246  ctx->normal_value = 0;
247
248  return test_duration();
249}
250
251static void test_atomic_compare_exchange_body(
252  rtems_test_parallel_context *base,
253  void *arg,
254  size_t active_workers,
255  size_t worker_index
256)
257{
258  smpatomic01_context *ctx = (smpatomic01_context *) base;
259  unsigned long counter = 0;
260
261  while (!rtems_test_parallel_stop_job(&ctx->base)) {
262    bool success;
263
264    do {
265      unsigned long zero = 0;
266
267      success = _Atomic_Compare_exchange_ulong(
268        &ctx->atomic_value,
269        &zero,
270        1,
271        ATOMIC_ORDER_ACQUIRE,
272        ATOMIC_ORDER_RELAXED
273      );
274    } while (!success);
275
276    ++counter;
277    ++ctx->normal_value;
278
279    _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
280  }
281
282  ctx->per_worker_value[worker_index] = counter;
283}
284
285static void test_atomic_compare_exchange_fini(
286  rtems_test_parallel_context *base,
287  void *arg,
288  size_t active_workers
289)
290{
291  smpatomic01_context *ctx = (smpatomic01_context *) base;
292
293  test_fini(ctx, "compare exchange", false);
294}
295
296static rtems_interval test_atomic_or_and_init(
297  rtems_test_parallel_context *base,
298  void *arg,
299  size_t active_workers
300)
301{
302  smpatomic01_context *ctx = (smpatomic01_context *) base;
303
304  _Atomic_Init_ulong(&ctx->atomic_value, 0);
305
306  return test_duration();
307}
308
309static void test_atomic_or_and_body(
310  rtems_test_parallel_context *base,
311  void *arg,
312  size_t active_workers,
313  size_t worker_index
314)
315{
316  smpatomic01_context *ctx = (smpatomic01_context *) base;
317  unsigned long the_bit = 1UL << worker_index;
318  unsigned long current_bit = 0;
319
320  while (!rtems_test_parallel_stop_job(&ctx->base)) {
321    unsigned long previous;
322
323    if (current_bit != 0) {
324      previous = _Atomic_Fetch_and_ulong(
325        &ctx->atomic_value,
326        ~the_bit,
327        ATOMIC_ORDER_RELAXED
328      );
329      current_bit = 0;
330    } else {
331      previous = _Atomic_Fetch_or_ulong(
332        &ctx->atomic_value,
333        the_bit,
334        ATOMIC_ORDER_RELAXED
335      );
336      current_bit = the_bit;
337    }
338
339    rtems_test_assert((previous & the_bit) != current_bit);
340  }
341
342  ctx->per_worker_value[worker_index] = current_bit;
343}
344
345static void test_atomic_or_and_fini(
346  rtems_test_parallel_context *base,
347  void *arg,
348  size_t active_workers
349)
350{
351  smpatomic01_context *ctx = (smpatomic01_context *) base;
352
353  test_fini(ctx, "or/and", true);
354}
355
356static rtems_interval test_atomic_fence_init(
357  rtems_test_parallel_context *base,
358  void *arg,
359  size_t active_workers
360)
361{
362  smpatomic01_context *ctx = (smpatomic01_context *) base;
363
364  ctx->normal_value = 0;
365  ctx->second_value = 0;
366  _Atomic_Fence(ATOMIC_ORDER_RELEASE);
367
368  return test_duration();
369}
370
371static void test_atomic_fence_body(
372  rtems_test_parallel_context *base,
373  void *arg,
374  size_t active_workers,
375  size_t worker_index
376)
377{
378  smpatomic01_context *ctx = (smpatomic01_context *) base;
379
380  if (rtems_test_parallel_is_master_worker(worker_index)) {
381    unsigned long counter = 0;
382
383    while (!rtems_test_parallel_stop_job(&ctx->base)) {
384      ++counter;
385      ctx->normal_value = counter;
386      _Atomic_Fence(ATOMIC_ORDER_RELEASE);
387      ctx->second_value = counter;
388    }
389  } else {
390    while (!rtems_test_parallel_stop_job(&ctx->base)) {
391      unsigned long n;
392      unsigned long s;
393
394      s = ctx->second_value;
395      _Atomic_Fence(ATOMIC_ORDER_ACQUIRE);
396      n = ctx->normal_value;
397
398      rtems_test_assert(n - s < LONG_MAX);
399    }
400  }
401}
402
403static void test_atomic_fence_fini(
404  rtems_test_parallel_context *base,
405  void *arg,
406  size_t active_workers
407)
408{
409  smpatomic01_context *ctx = (smpatomic01_context *) base;
410
411  printf(
412    "=== atomic fence test case ===\n"
413    "normal value = %lu, second value = %lu\n",
414    ctx->normal_value,
415    ctx->second_value
416  );
417}
418
419static rtems_interval test_atomic_store_load_rmw_init(
420  rtems_test_parallel_context *base,
421  void *arg,
422  size_t active_workers
423)
424{
425  smpatomic01_context *ctx = (smpatomic01_context *) base;
426  size_t i;
427
428  _Atomic_Init_ulong(&ctx->atomic_value, 0);
429
430  _SMP_barrier_Control_initialize(&ctx->barrier);
431
432  for (i = 0; i < active_workers; ++i) {
433    _SMP_barrier_State_initialize(&ctx->barrier_state[i]);
434  }
435
436  return 0;
437}
438
439static sbintime_t now(void)
440{
441  struct bintime bt;
442
443  rtems_bsd_binuptime(&bt);
444  return bttosbt(bt);
445}
446
447static void test_atomic_store_load_rmw_body(
448  rtems_test_parallel_context *base,
449  void *arg,
450  size_t active_workers,
451  size_t worker_index
452)
453{
454  smpatomic01_context *ctx = (smpatomic01_context *) base;
455  uint32_t cpu_self_index;
456  sbintime_t t;
457  int counter;
458
459  if (rtems_test_parallel_is_master_worker(worker_index)) {
460    rtems_status_code sc;
461
462    sc = rtems_task_wake_after(1);
463    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
464
465    t = now();
466    t += (MS_PER_TICK / 2) * SBT_1MS;
467    ctx->load_trigger_time = t;
468    t += MS_PER_TICK * SBT_1MS;
469    ctx->rmw_trigger_time = t;
470  }
471
472  _Atomic_Fence(ATOMIC_ORDER_SEQ_CST);
473
474  _SMP_barrier_Wait(
475    &ctx->barrier,
476    &ctx->barrier_state[worker_index],
477    active_workers
478  );
479
480  /*
481   * Use the physical processor index, to observe timing differences introduced
482   * by the system topology.
483   */
484  cpu_self_index = rtems_scheduler_get_processor();
485
486  /* Store release and load acquire test case */
487
488  counter = 0;
489  t = ctx->load_trigger_time;
490
491  while (now() < t) {
492    /* Wait */
493  }
494
495  if (cpu_self_index == 0) {
496    _Atomic_Store_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELEASE);
497  } else {
498    while (_Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_ACQUIRE) == 0) {
499      ++counter;
500    }
501  }
502
503  ctx->load_change_time[cpu_self_index] = now();
504  ctx->load_count[cpu_self_index] = counter;
505
506  /* Read-modify-write test case */
507
508  if (cpu_self_index == 0) {
509    _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELAXED);
510  }
511
512  counter = 0;
513  t = ctx->rmw_trigger_time;
514
515  while (now() < t) {
516    /* Wait */
517  }
518
519  if (cpu_self_index == 0) {
520    _Atomic_Store_ulong(&ctx->atomic_value, 1, ATOMIC_ORDER_RELAXED);
521  } else {
522    while (
523      (_Atomic_Fetch_or_ulong(&ctx->atomic_value, 2, ATOMIC_ORDER_RELAXED) & 1)
524        == 0
525    ) {
526      ++counter;
527    }
528  }
529
530  ctx->rmw_change_time[cpu_self_index] = now();
531  ctx->rmw_count[cpu_self_index] = counter;
532}
533
534static void test_atomic_store_load_rmw_fini(
535  rtems_test_parallel_context *base,
536  void *arg,
537  size_t active_workers
538)
539{
540  smpatomic01_context *ctx = (smpatomic01_context *) base;
541  size_t i;
542  struct bintime bt;
543  struct timespec ts;
544
545  printf("=== atomic store release and load acquire test case ===\n");
546
547  for (i = 0; i < active_workers; ++i) {
548    bt = sbttobt(ctx->load_change_time[i] - ctx->load_trigger_time);
549    bintime2timespec(&bt, &ts);
550    printf(
551      "processor %zu delta %lins, load count %i\n",
552      i,
553      ts.tv_nsec,
554      ctx->load_count[i]
555    );
556  }
557
558  printf("=== atomic read-modify-write test case ===\n");
559
560  for (i = 0; i < active_workers; ++i) {
561    bt = sbttobt(ctx->rmw_change_time[i] - ctx->rmw_trigger_time);
562    bintime2timespec(&bt, &ts);
563    printf(
564      "processor %zu delta %lins, read-modify-write count %i\n",
565      i,
566      ts.tv_nsec,
567      ctx->rmw_count[i]
568    );
569  }
570}
571
572/*
573 * See also Hans-J. Boehm, HP Laboratories,
574 * "Can Seqlocks Get Along With Programming Language Memory Models?",
575 * http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
576 */
577
578static rtems_interval test_seqlock_init(
579  rtems_test_parallel_context *base,
580  void *arg,
581  size_t active_workers
582)
583{
584  smpatomic01_context *ctx = (smpatomic01_context *) base;
585
586  ctx->normal_value = 0;
587  ctx->second_value = 0;
588  _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
589
590  return test_duration();
591}
592
593static unsigned long seqlock_read(smpatomic01_context *ctx)
594{
595  unsigned long counter = 0;
596
597  while (!rtems_test_parallel_stop_job(&ctx->base)) {
598    unsigned long seq0;
599    unsigned long seq1;
600    unsigned long a;
601    unsigned long b;
602
603    do {
604      seq0 = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_ACQUIRE);
605
606      a = ctx->normal_value;
607      b = ctx->second_value;
608
609      seq1 =
610        _Atomic_Fetch_add_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
611    } while (seq0 != seq1 || seq0 % 2 != 0);
612
613    ++counter;
614    rtems_test_assert(a == b);
615  }
616
617  return counter;
618}
619
620static void test_single_writer_seqlock_body(
621  rtems_test_parallel_context *base,
622  void *arg,
623  size_t active_workers,
624  size_t worker_index
625)
626{
627  smpatomic01_context *ctx = (smpatomic01_context *) base;
628  uint32_t cpu_self_index;
629  unsigned long counter;
630
631  /*
632   * Use the physical processor index, to observe timing differences introduced
633   * by the system topology.
634   */
635  cpu_self_index = rtems_scheduler_get_processor();
636
637  if (cpu_self_index == 0) {
638    counter = 0;
639
640    while (!rtems_test_parallel_stop_job(&ctx->base)) {
641      unsigned long seq;
642
643      seq = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
644      _Atomic_Store_ulong(&ctx->atomic_value, seq + 1, ATOMIC_ORDER_RELAXED);
645
646      /* There is no atomic store with acquire/release semantics */
647      _Atomic_Fence(ATOMIC_ORDER_ACQ_REL);
648
649      ++counter;
650      ctx->normal_value = counter;
651      ctx->second_value = counter;
652
653      _Atomic_Store_ulong(&ctx->atomic_value, seq + 2, ATOMIC_ORDER_RELEASE);
654    }
655  } else {
656    counter = seqlock_read(ctx);
657  }
658
659  ctx->per_worker_value[cpu_self_index] = counter;
660}
661
662static void test_single_writer_seqlock_fini(
663  rtems_test_parallel_context *base,
664  void *arg,
665  size_t active_workers
666)
667{
668  smpatomic01_context *ctx = (smpatomic01_context *) base;
669  size_t i;
670
671  printf("=== single writer seqlock test case ===\n");
672
673  for (i = 0; i < active_workers; ++i) {
674    printf(
675      "processor %zu count %lu\n",
676      i,
677      ctx->per_worker_value[i]
678    );
679  }
680}
681
682static void test_multi_writer_seqlock_body(
683  rtems_test_parallel_context *base,
684  void *arg,
685  size_t active_workers,
686  size_t worker_index
687)
688{
689  smpatomic01_context *ctx = (smpatomic01_context *) base;
690  uint32_t cpu_self_index;
691  unsigned long counter;
692
693  /*
694   * Use the physical processor index, to observe timing differences introduced
695   * by the system topology.
696   */
697  cpu_self_index = rtems_scheduler_get_processor();
698
699  if (cpu_self_index % 2 == 0) {
700    counter = 0;
701
702    while (!rtems_test_parallel_stop_job(&ctx->base)) {
703      unsigned long seq;
704
705      do {
706        seq = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
707      } while (
708        seq % 2 != 0
709          || !_Atomic_Compare_exchange_ulong(
710              &ctx->atomic_value,
711              &seq,
712              seq + 1,
713              ATOMIC_ORDER_ACQ_REL,
714              ATOMIC_ORDER_RELAXED
715            )
716      );
717
718      ++counter;
719      ctx->normal_value = counter;
720      ctx->second_value = counter;
721
722      _Atomic_Store_ulong(&ctx->atomic_value, seq + 2, ATOMIC_ORDER_RELEASE);
723    }
724  } else {
725    counter = seqlock_read(ctx);
726  }
727
728  ctx->per_worker_value[cpu_self_index] = counter;
729}
730
731static void test_multi_writer_seqlock_fini(
732  rtems_test_parallel_context *base,
733  void *arg,
734  size_t active_workers
735)
736{
737  smpatomic01_context *ctx = (smpatomic01_context *) base;
738  size_t i;
739
740  printf("=== multi writer seqlock test case ===\n");
741
742  for (i = 0; i < active_workers; ++i) {
743    printf(
744      "processor %zu count %lu\n",
745      i,
746      ctx->per_worker_value[i]
747    );
748  }
749}
750
751static const rtems_test_parallel_job test_jobs[] = {
752  {
753    .init = test_atomic_add_init,
754    .body = test_atomic_add_body,
755    .fini = test_atomic_add_fini
756  }, {
757    .init = test_atomic_flag_init,
758    .body = test_atomic_flag_body,
759    .fini = test_atomic_flag_fini
760  }, {
761    .init = test_atomic_sub_init,
762    .body = test_atomic_sub_body,
763    .fini = test_atomic_sub_fini
764  }, {
765    .init = test_atomic_compare_exchange_init,
766    .body = test_atomic_compare_exchange_body,
767    .fini = test_atomic_compare_exchange_fini
768  }, {
769    .init = test_atomic_or_and_init,
770    .body = test_atomic_or_and_body,
771    .fini = test_atomic_or_and_fini
772  }, {
773    .init = test_atomic_fence_init,
774    .body = test_atomic_fence_body,
775    .fini = test_atomic_fence_fini
776  }, {
777    .init = test_atomic_store_load_rmw_init,
778    .body = test_atomic_store_load_rmw_body,
779    .fini = test_atomic_store_load_rmw_fini
780  }, {
781    .init = test_seqlock_init,
782    .body = test_single_writer_seqlock_body,
783    .fini = test_single_writer_seqlock_fini
784  }, {
785    .init = test_seqlock_init,
786    .body = test_multi_writer_seqlock_body,
787    .fini = test_multi_writer_seqlock_fini
788  }
789};
790
791static void setup_worker(
792  rtems_test_parallel_context *base,
793  size_t worker_index,
794  rtems_id worker_id
795)
796{
797  rtems_status_code sc;
798  rtems_task_priority prio;
799
800  sc = rtems_task_set_priority(worker_id, WORKER_PRIORITY, &prio);
801  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
802}
803
804static void Init(rtems_task_argument arg)
805{
806  smpatomic01_context *ctx = &test_instance;
807
808  TEST_BEGIN();
809
810  rtems_test_parallel(
811    &ctx->base,
812    setup_worker,
813    &test_jobs[0],
814    RTEMS_ARRAY_SIZE(test_jobs)
815  );
816
817  TEST_END();
818  rtems_test_exit(0);
819}
820
821#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
822#define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
823
824#define CONFIGURE_MICROSECONDS_PER_TICK (MS_PER_TICK * 1000)
825
826#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
827
828#define CONFIGURE_MAXIMUM_TASKS CPU_COUNT
829
830#define CONFIGURE_MAXIMUM_TIMERS 1
831
832#define CONFIGURE_INIT_TASK_PRIORITY MASTER_PRIORITY
833#define CONFIGURE_INIT_TASK_INITIAL_MODES RTEMS_DEFAULT_MODES
834#define CONFIGURE_INIT_TASK_ATTRIBUTES RTEMS_DEFAULT_ATTRIBUTES
835
836#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
837
838#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
839
840#define CONFIGURE_INIT
841
842#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.