source: rtems/testsuites/smptests/smplock01/init.c

Last change on this file was 4f6f5f46, checked in by Sebastian Huber <sebastian.huber@…>, on 01/09/24 at 09:27:39

smplock01: Convert to JSON data

This avoids a dependency on the non-standard libxml2 module.

  • Property mode set to 100644
File size: 17.9 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/*
4 * Copyright (C) 2013, 2024 embedded brains GmbH & Co. KG
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#ifdef HAVE_CONFIG_H
29#include "config.h"
30#endif
31
32#include <rtems/score/smplock.h>
33#include <rtems/score/smplockmcs.h>
34#include <rtems/score/smplockseq.h>
35#include <rtems/test-info.h>
36#include <rtems.h>
37
38#include "tmacros.h"
39
40const char rtems_test_name[] = "SMPLOCK 1";
41
42#define TASK_PRIORITY 1
43
44#define CPU_COUNT 32
45
46#define TEST_COUNT 13
47
48typedef struct {
49  rtems_test_parallel_context base;
50  const char *test_sep;
51  const char *counter_sep;
52  unsigned long counter[TEST_COUNT];
53  unsigned long local_counter[CPU_COUNT][TEST_COUNT][CPU_COUNT];
54  SMP_lock_Control lock RTEMS_ALIGNED(CPU_CACHE_LINE_BYTES);
55  Atomic_Uint flag RTEMS_ALIGNED(CPU_CACHE_LINE_BYTES);
56  SMP_MCS_lock_Control mcs_lock RTEMS_ALIGNED(CPU_CACHE_LINE_BYTES);
57#if defined(RTEMS_PROFILING)
58  SMP_lock_Stats mcs_stats;
59#endif
60  SMP_sequence_lock_Control seq_lock RTEMS_ALIGNED(CPU_CACHE_LINE_BYTES);
61  int a RTEMS_ALIGNED(CPU_CACHE_LINE_BYTES);
62  int b RTEMS_ALIGNED(CPU_CACHE_LINE_BYTES);
63} test_context;
64
65static test_context test_instance = {
66  .lock = SMP_LOCK_INITIALIZER("global ticket"),
67#if defined(RTEMS_PROFILING)
68  .mcs_stats = SMP_LOCK_STATS_INITIALIZER("global MCS"),
69#endif
70  .flag = ATOMIC_INITIALIZER_UINT(0),
71  .mcs_lock = SMP_MCS_LOCK_INITIALIZER,
72  .seq_lock = SMP_SEQUENCE_LOCK_INITIALIZER
73};
74
75static rtems_interval test_duration(void)
76{
77  return rtems_clock_get_ticks_per_second();
78}
79
80static rtems_interval test_init(
81  rtems_test_parallel_context *base,
82  void *arg,
83  size_t active_workers
84)
85{
86  return test_duration();
87}
88
89static const rtems_test_parallel_job test_jobs[TEST_COUNT];
90
91static void test_fini(
92  test_context *ctx,
93  const char *lock_type,
94  bool global_lock,
95  const char *section_type,
96  size_t test,
97  size_t active_workers
98)
99{
100  bool cascade = test_jobs[test].cascade;
101  unsigned long sum = 0;
102  const char *value_sep;
103  size_t i;
104
105  if (active_workers == 1 || !cascade) {
106    printf(
107      "%s{\n"
108      "    \"lock-type\": \"%s\",\n"
109      "    \"lock-object\": \"%s\",\n"
110      "    \"section-type\": \"%s\",\n"
111      "    \"results\": [",
112      ctx->test_sep,
113      lock_type,
114      global_lock ? "global" : "local",
115      section_type
116    );
117    ctx->test_sep = ", ";
118    ctx->counter_sep = "\n      ";
119  }
120
121  printf(
122    "%s{\n"
123    "        \"counter\": [", ctx->counter_sep);
124  ctx->counter_sep = "\n      }, ";
125  value_sep = "";
126
127  for (i = 0; i < active_workers; ++i) {
128    unsigned long local_counter =
129      ctx->local_counter[active_workers - 1][test][i];
130
131    sum += local_counter;
132
133    printf(
134      "%s%lu",
135      value_sep,
136      local_counter
137    );
138    value_sep = ", ";
139  }
140
141  printf(
142    "],\n"
143    "        \"global-counter\": %lu,\n"
144    "        \"sum-of-local-counter\": %lu",
145    ctx->counter[test],
146    sum
147  );
148
149  if (active_workers == rtems_scheduler_get_processor_maximum() || !cascade) {
150    printf("\n      }\n    ]\n  }");
151  }
152}
153
154static void test_0_body(
155  rtems_test_parallel_context *base,
156  void *arg,
157  size_t active_workers,
158  size_t worker_index
159)
160{
161  test_context *ctx = (test_context *) base;
162  size_t test = 0;
163  unsigned long counter = 0;
164  SMP_lock_Context lock_context;
165
166  while (!rtems_test_parallel_stop_job(&ctx->base)) {
167    _SMP_lock_Acquire(&ctx->lock, &lock_context);
168    _SMP_lock_Release(&ctx->lock, &lock_context);
169    ++counter;
170  }
171
172  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
173}
174
175static void test_0_fini(
176  rtems_test_parallel_context *base,
177  void *arg,
178  size_t active_workers
179)
180{
181  test_context *ctx = (test_context *) base;
182
183  test_fini(
184    ctx,
185    "Ticket Lock",
186    true,
187    "local counter",
188    0,
189    active_workers
190  );
191}
192
193static void test_1_body(
194  rtems_test_parallel_context *base,
195  void *arg,
196  size_t active_workers,
197  size_t worker_index
198)
199{
200  test_context *ctx = (test_context *) base;
201  size_t test = 1;
202  unsigned long counter = 0;
203  SMP_MCS_lock_Context lock_context;
204
205  while (!rtems_test_parallel_stop_job(&ctx->base)) {
206    _SMP_MCS_lock_Acquire(&ctx->mcs_lock, &lock_context, &ctx->mcs_stats);
207    _SMP_MCS_lock_Release(&ctx->mcs_lock, &lock_context);
208    ++counter;
209  }
210
211  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
212}
213
214static void test_1_fini(
215  rtems_test_parallel_context *base,
216  void *arg,
217  size_t active_workers
218)
219{
220  test_context *ctx = (test_context *) base;
221
222  test_fini(
223    ctx,
224    "MCS Lock",
225    true,
226    "local counter",
227    1,
228    active_workers
229  );
230}
231
232static void test_2_body(
233  rtems_test_parallel_context *base,
234  void *arg,
235  size_t active_workers,
236  size_t worker_index
237)
238{
239  test_context *ctx = (test_context *) base;
240  size_t test = 2;
241  unsigned long counter = 0;
242  SMP_lock_Context lock_context;
243
244  while (!rtems_test_parallel_stop_job(&ctx->base)) {
245    _SMP_lock_Acquire(&ctx->lock, &lock_context);
246    ++ctx->counter[test];
247    _SMP_lock_Release(&ctx->lock, &lock_context);
248    ++counter;
249  }
250
251  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
252}
253
254static void test_2_fini(
255  rtems_test_parallel_context *base,
256  void *arg,
257  size_t active_workers
258)
259{
260  test_context *ctx = (test_context *) base;
261
262  test_fini(
263    ctx,
264    "Ticket Lock",
265    true,
266    "global counter",
267    2,
268    active_workers
269  );
270}
271
272static void test_3_body(
273  rtems_test_parallel_context *base,
274  void *arg,
275  size_t active_workers,
276  size_t worker_index
277)
278{
279  test_context *ctx = (test_context *) base;
280  size_t test = 3;
281  unsigned long counter = 0;
282  SMP_MCS_lock_Context lock_context;
283
284  while (!rtems_test_parallel_stop_job(&ctx->base)) {
285    _SMP_MCS_lock_Acquire(&ctx->mcs_lock, &lock_context, &ctx->mcs_stats);
286    ++ctx->counter[test];
287    _SMP_MCS_lock_Release(&ctx->mcs_lock, &lock_context);
288    ++counter;
289  }
290
291  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
292}
293
294static void test_3_fini(
295  rtems_test_parallel_context *base,
296  void *arg,
297  size_t active_workers
298)
299{
300  test_context *ctx = (test_context *) base;
301
302  test_fini(
303    ctx,
304    "MCS Lock",
305    true,
306    "global counter",
307    3,
308    active_workers
309  );
310}
311
312static void test_4_body(
313  rtems_test_parallel_context *base,
314  void *arg,
315  size_t active_workers,
316  size_t worker_index
317)
318{
319  test_context *ctx = (test_context *) base;
320  size_t test = 4;
321  unsigned long counter = 0;
322  SMP_lock_Control lock;
323  SMP_lock_Context lock_context;
324
325  _SMP_lock_Initialize(&lock, "local");
326
327  while (!rtems_test_parallel_stop_job(&ctx->base)) {
328    _SMP_lock_Acquire(&lock, &lock_context);
329    _SMP_lock_Release(&lock, &lock_context);
330    ++counter;
331  }
332
333  _SMP_lock_Destroy(&lock);
334
335  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
336}
337
338static void test_4_fini(
339  rtems_test_parallel_context *base,
340  void *arg,
341  size_t active_workers
342)
343{
344  test_context *ctx = (test_context *) base;
345
346  test_fini(
347    ctx,
348    "Ticket Lock",
349    false,
350    "local counter",
351    4,
352    active_workers
353  );
354}
355
356static void test_5_body(
357  rtems_test_parallel_context *base,
358  void *arg,
359  size_t active_workers,
360  size_t worker_index
361)
362{
363  test_context *ctx = (test_context *) base;
364  size_t test = 5;
365  unsigned long counter = 0;
366#if defined(RTEMS_PROFILING)
367  SMP_lock_Stats stats;
368#endif
369  SMP_MCS_lock_Control lock;
370  SMP_MCS_lock_Context lock_context;
371
372  _SMP_lock_Stats_initialize(&stats, "local");
373  _SMP_MCS_lock_Initialize(&lock);
374
375  while (!rtems_test_parallel_stop_job(&ctx->base)) {
376    _SMP_MCS_lock_Acquire(&lock, &lock_context, &stats);
377    _SMP_MCS_lock_Release(&lock, &lock_context);
378    ++counter;
379  }
380
381  _SMP_MCS_lock_Destroy(&lock);
382  _SMP_lock_Stats_destroy(&stats);
383
384  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
385}
386
387static void test_5_fini(
388  rtems_test_parallel_context *base,
389  void *arg,
390  size_t active_workers
391)
392{
393  test_context *ctx = (test_context *) base;
394
395  test_fini(
396    ctx,
397    "MCS Lock",
398    false,
399    "local counter",
400    5,
401    active_workers
402  );
403}
404
405static void test_6_body(
406  rtems_test_parallel_context *base,
407  void *arg,
408  size_t active_workers,
409  size_t worker_index
410)
411{
412  test_context *ctx = (test_context *) base;
413  size_t test = 6;
414  unsigned long counter = 0;
415  SMP_lock_Control lock;
416  SMP_lock_Context lock_context;
417
418  _SMP_lock_Initialize(&lock, "local");
419
420  while (!rtems_test_parallel_stop_job(&ctx->base)) {
421    _SMP_lock_Acquire(&lock, &lock_context);
422
423    /* The counter value is not interesting, only the access to it */
424    ++ctx->counter[test];
425
426    _SMP_lock_Release(&lock, &lock_context);
427    ++counter;
428  }
429
430  _SMP_lock_Destroy(&lock);
431
432  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
433}
434
435static void test_6_fini(
436  rtems_test_parallel_context *base,
437  void *arg,
438  size_t active_workers
439)
440{
441  test_context *ctx = (test_context *) base;
442
443  test_fini(
444    ctx,
445    "Ticket Lock",
446    false,
447    "global counter",
448    6,
449    active_workers
450  );
451}
452
453static void test_7_body(
454  rtems_test_parallel_context *base,
455  void *arg,
456  size_t active_workers,
457  size_t worker_index
458)
459{
460  test_context *ctx = (test_context *) base;
461  size_t test = 7;
462  unsigned long counter = 0;
463#if defined(RTEMS_PROFILING)
464  SMP_lock_Stats stats;
465#endif
466  SMP_MCS_lock_Control lock;
467  SMP_MCS_lock_Context lock_context;
468
469  _SMP_lock_Stats_initialize(&stats, "local");
470  _SMP_MCS_lock_Initialize(&lock);
471
472  while (!rtems_test_parallel_stop_job(&ctx->base)) {
473    _SMP_MCS_lock_Acquire(&lock, &lock_context, &stats);
474
475    /* The counter value is not interesting, only the access to it */
476    ++ctx->counter[test];
477
478    _SMP_MCS_lock_Release(&lock, &lock_context);
479    ++counter;
480  }
481
482  _SMP_MCS_lock_Destroy(&lock);
483  _SMP_lock_Stats_destroy(&stats);
484
485  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
486}
487
488static void test_7_fini(
489  rtems_test_parallel_context *base,
490  void *arg,
491  size_t active_workers
492)
493{
494  test_context *ctx = (test_context *) base;
495
496  test_fini(
497    ctx,
498    "MCS Lock",
499    false,
500    "global counter",
501    7,
502    active_workers
503  );
504}
505
506static void busy_section(void)
507{
508  int i;
509
510  for (i = 0; i < 101; ++i) {
511    RTEMS_COMPILER_MEMORY_BARRIER();
512  }
513}
514
515static void test_8_body(
516  rtems_test_parallel_context *base,
517  void *arg,
518  size_t active_workers,
519  size_t worker_index
520)
521{
522  test_context *ctx = (test_context *) base;
523  size_t test = 8;
524  unsigned long counter = 0;
525  SMP_lock_Context lock_context;
526
527  while (!rtems_test_parallel_stop_job(&ctx->base)) {
528    _SMP_lock_Acquire(&ctx->lock, &lock_context);
529    busy_section();
530    _SMP_lock_Release(&ctx->lock, &lock_context);
531    ++counter;
532  }
533
534  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
535}
536
537static void test_8_fini(
538  rtems_test_parallel_context *base,
539  void *arg,
540  size_t active_workers
541)
542{
543  test_context *ctx = (test_context *) base;
544
545  test_fini(
546    ctx,
547    "Ticket Lock",
548    true,
549    "busy loop",
550    8,
551    active_workers
552  );
553}
554
555static void test_9_body(
556  rtems_test_parallel_context *base,
557  void *arg,
558  size_t active_workers,
559  size_t worker_index
560)
561{
562  test_context *ctx = (test_context *) base;
563  size_t test = 9;
564  unsigned long counter = 0;
565  SMP_MCS_lock_Context lock_context;
566
567  while (!rtems_test_parallel_stop_job(&ctx->base)) {
568    _SMP_MCS_lock_Acquire(&ctx->mcs_lock, &lock_context, &ctx->mcs_stats);
569    busy_section();
570    _SMP_MCS_lock_Release(&ctx->mcs_lock, &lock_context);
571    ++counter;
572  }
573
574  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
575}
576
577static void test_9_fini(
578  rtems_test_parallel_context *base,
579  void *arg,
580  size_t active_workers
581)
582{
583  test_context *ctx = (test_context *) base;
584
585  test_fini(
586    ctx,
587    "MCS Lock",
588    true,
589    "busy loop",
590    9,
591    active_workers
592  );
593}
594
595static void test_10_body(
596  rtems_test_parallel_context *base,
597  void *arg,
598  size_t active_workers,
599  size_t worker_index
600)
601{
602  test_context *ctx = (test_context *) base;
603  size_t test = 10;
604  unsigned long counter = 0;
605  unsigned long seq;
606
607  if (rtems_test_parallel_is_master_worker(worker_index)) {
608    while (!rtems_test_parallel_stop_job(&ctx->base)) {
609      seq = _SMP_sequence_lock_Write_begin(&ctx->seq_lock);
610
611      ctx->a = counter;
612      ctx->b = counter;
613
614      _SMP_sequence_lock_Write_end(&ctx->seq_lock, seq);
615
616      ++counter;
617    }
618  } else {
619    while (!rtems_test_parallel_stop_job(&ctx->base)) {
620      unsigned long a;
621      unsigned long b;
622
623      do {
624        seq = _SMP_sequence_lock_Read_begin(&ctx->seq_lock);
625
626        a = ctx->a;
627        b = ctx->b;
628
629      } while (_SMP_sequence_lock_Read_retry(&ctx->seq_lock, seq));
630
631      ++counter;
632      rtems_test_assert(a == b);
633    }
634  }
635
636  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
637}
638
639static void test_10_fini(
640  rtems_test_parallel_context *base,
641  void *arg,
642  size_t active_workers
643)
644{
645  test_context *ctx = (test_context *) base;
646
647  test_fini(
648    ctx,
649    "Sequence Lock",
650    true,
651    "two global counter",
652    10,
653    active_workers
654  );
655}
656
657static void test_11_body(
658  rtems_test_parallel_context *base,
659  void *arg,
660  size_t active_workers,
661  size_t worker_index
662)
663{
664  test_context *ctx = (test_context *) base;
665  size_t test = 11;
666  unsigned long counter = 0;
667
668  while (!rtems_test_parallel_stop_job(&ctx->base)) {
669    while (_Atomic_Exchange_uint(&ctx->flag, 1, ATOMIC_ORDER_ACQUIRE) != 0) {
670      /* Wait */
671    }
672
673    _Atomic_Store_uint(&ctx->flag, 0, ATOMIC_ORDER_RELEASE);
674    ++counter;
675  }
676
677  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
678}
679
680static void test_11_fini(
681  rtems_test_parallel_context *base,
682  void *arg,
683  size_t active_workers
684)
685{
686  test_context *ctx = (test_context *) base;
687
688  test_fini(
689    ctx,
690    "TAS Lock",
691    true,
692    "local counter",
693    11,
694    active_workers
695  );
696}
697
698static void test_12_body(
699  rtems_test_parallel_context *base,
700  void *arg,
701  size_t active_workers,
702  size_t worker_index
703)
704{
705  test_context *ctx = (test_context *) base;
706  size_t test = 12;
707  unsigned long counter = 0;
708
709  while (!rtems_test_parallel_stop_job(&ctx->base)) {
710    while (_Atomic_Exchange_uint(&ctx->flag, 1, ATOMIC_ORDER_ACQUIRE) != 0) {
711      while (_Atomic_Load_uint(&ctx->flag, ATOMIC_ORDER_RELAXED) != 0) {
712        /* Wait */
713      }
714    }
715
716    _Atomic_Store_uint(&ctx->flag, 0, ATOMIC_ORDER_RELEASE);
717    ++counter;
718  }
719
720  ctx->local_counter[active_workers - 1][test][worker_index] = counter;
721}
722
723static void test_12_fini(
724  rtems_test_parallel_context *base,
725  void *arg,
726  size_t active_workers
727)
728{
729  test_context *ctx = (test_context *) base;
730
731  test_fini(
732    ctx,
733    "TTAS Lock",
734    true,
735    "local counter",
736    12,
737    active_workers
738  );
739}
740
741static const rtems_test_parallel_job test_jobs[TEST_COUNT] = {
742  {
743    .init = test_init,
744    .body = test_0_body,
745    .fini = test_0_fini,
746    .cascade = true
747  }, {
748    .init = test_init,
749    .body = test_1_body,
750    .fini = test_1_fini,
751    .cascade = true
752  }, {
753    .init = test_init,
754    .body = test_2_body,
755    .fini = test_2_fini,
756    .cascade = false
757  }, {
758    .init = test_init,
759    .body = test_3_body,
760    .fini = test_3_fini,
761    .cascade = false
762  }, {
763    .init = test_init,
764    .body = test_4_body,
765    .fini = test_4_fini,
766    .cascade = true
767  }, {
768    .init = test_init,
769    .body = test_5_body,
770    .fini = test_5_fini,
771    .cascade = true
772  }, {
773    .init = test_init,
774    .body = test_6_body,
775    .fini = test_6_fini,
776    .cascade = false
777  }, {
778    .init = test_init,
779    .body = test_7_body,
780    .fini = test_7_fini,
781    .cascade = false
782  }, {
783    .init = test_init,
784    .body = test_8_body,
785    .fini = test_8_fini,
786    .cascade = false
787  }, {
788    .init = test_init,
789    .body = test_9_body,
790    .fini = test_9_fini,
791    .cascade = false
792  }, {
793    .init = test_init,
794    .body = test_10_body,
795    .fini = test_10_fini,
796    .cascade = false
797  }, {
798    .init = test_init,
799    .body = test_11_body,
800    .fini = test_11_fini,
801    .cascade = true
802  }, {
803    .init = test_init,
804    .body = test_12_body,
805    .fini = test_12_fini,
806    .cascade = true
807  }
808};
809
810static void test(void)
811{
812  test_context *ctx = &test_instance;
813
814  printf("*** BEGIN OF JSON DATA ***\n[\n  ");
815  ctx->test_sep = "";
816  rtems_test_parallel(&ctx->base, NULL, &test_jobs[0], TEST_COUNT);
817  printf("\n]\n*** END OF JSON DATA ***\n");
818}
819
820static void Init(rtems_task_argument arg)
821{
822  TEST_BEGIN();
823
824  test();
825
826  TEST_END();
827  rtems_test_exit(0);
828}
829
830#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
831#define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
832
833#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
834
835#define CONFIGURE_MAXIMUM_TASKS CPU_COUNT
836
837#define CONFIGURE_MAXIMUM_SEMAPHORES 1
838
839#define CONFIGURE_MAXIMUM_TIMERS 1
840
841#define CONFIGURE_INIT_TASK_PRIORITY TASK_PRIORITY
842#define CONFIGURE_INIT_TASK_INITIAL_MODES RTEMS_DEFAULT_MODES
843#define CONFIGURE_INIT_TASK_ATTRIBUTES RTEMS_DEFAULT_ATTRIBUTES
844
845#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
846
847#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
848
849#define CONFIGURE_INIT
850
851#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.