source: rtems/testsuites/tmtests/tmfine01/init.c @ 9de8d61

Last change on this file since 9de8d61 was 9de8d61, checked in by Sebastian Huber <sebastian.huber@…>, on 07/17/20 at 11:36:49

libtest: <rtems/test.h> to <rtems/test-info.h>

Rename this header file to later move <t.h> to <rtems/test.h>. The main
feature provided by <rtems/test-info.h> is the output of standard test
information which is consumed by the RTEMS Tester.

Update #3199.

  • Property mode set to 100644
File size: 18.0 KB
Line 
1/*
2 * Copyright (c) 2015, 2017 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16#include "config.h"
17#endif
18
19#include "tmacros.h"
20
21#include <sys/lock.h>
22#include <inttypes.h>
23#include <pthread.h>
24#include <sched.h>
25#include <stdio.h>
26
27#include <rtems/test-info.h>
28
29const char rtems_test_name[] = "TMFINE 1";
30
31#if defined(RTEMS_SMP)
32#define CPU_COUNT 32
33#else
34#define CPU_COUNT 1
35#endif
36
37#define MSG_COUNT 3
38
39typedef struct {
40  uint32_t value;
41} test_msg;
42
43typedef struct {
44  rtems_test_parallel_context base;
45  rtems_id master;
46  rtems_id sema;
47  rtems_id mq[CPU_COUNT];
48  uint32_t self_event_ops[CPU_COUNT][CPU_COUNT];
49  uint32_t all_to_one_event_ops[CPU_COUNT][CPU_COUNT];
50  uint32_t one_mutex_ops[CPU_COUNT][CPU_COUNT];
51  uint32_t many_mutex_ops[CPU_COUNT][CPU_COUNT];
52  uint32_t self_msg_ops[CPU_COUNT][CPU_COUNT];
53  uint32_t many_to_one_msg_ops[CPU_COUNT][CPU_COUNT];
54  uint32_t many_sys_lock_mutex_ops[CPU_COUNT][CPU_COUNT];
55  uint32_t many_classic_ceiling_ops[CPU_COUNT][CPU_COUNT];
56  uint32_t many_classic_mrsp_ops[CPU_COUNT][CPU_COUNT];
57  uint32_t many_pthread_spinlock_ops[CPU_COUNT][CPU_COUNT];
58  uint32_t many_pthread_mutex_inherit_ops[CPU_COUNT][CPU_COUNT];
59  uint32_t many_pthread_mutex_protect_ops[CPU_COUNT][CPU_COUNT];
60} test_context;
61
62static test_context test_instance;
63
64static rtems_interval test_duration(void)
65{
66  return rtems_clock_get_ticks_per_second();
67}
68
69static rtems_interval test_init(
70  rtems_test_parallel_context *base,
71  void *arg,
72  size_t active_workers
73)
74{
75  return test_duration();
76}
77
78static void test_fini(
79  const char *name,
80  uint32_t *counters,
81  size_t active_workers
82)
83{
84  size_t i;
85
86  printf("  <%s activeWorker=\"%zu\">\n", name, active_workers);
87
88  for (i = 0; i < active_workers; ++i) {
89    printf(
90      "    <Counter worker=\"%zu\">%" PRIu32 "</Counter>\n",
91      i,
92      counters[i]
93    );
94  }
95
96  printf("  </%s>\n", name);
97}
98
99static void test_self_event_body(
100  rtems_test_parallel_context *base,
101  void *arg,
102  size_t active_workers,
103  size_t worker_index
104)
105{
106  test_context *ctx = (test_context *) base;
107  rtems_id id = rtems_task_self();
108  uint32_t counter = 0;
109
110  while (!rtems_test_parallel_stop_job(&ctx->base)) {
111    rtems_status_code sc;
112    rtems_event_set out;
113
114    ++counter;
115
116    sc = rtems_event_send(id, RTEMS_EVENT_0);
117    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
118
119    sc = rtems_event_receive(
120      RTEMS_EVENT_0,
121      RTEMS_WAIT | RTEMS_EVENT_ANY,
122      RTEMS_NO_TIMEOUT,
123      &out
124    );
125    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
126  }
127
128  ctx->self_event_ops[active_workers - 1][worker_index] = counter;
129}
130
131static void test_self_event_fini(
132  rtems_test_parallel_context *base,
133  void *arg,
134  size_t active_workers
135)
136{
137  test_context *ctx = (test_context *) base;
138
139  test_fini(
140    "SelfEvent",
141    &ctx->self_event_ops[active_workers - 1][0],
142    active_workers
143  );
144}
145
146static void test_all_to_one_event_body(
147  rtems_test_parallel_context *base,
148  void *arg,
149  size_t active_workers,
150  size_t worker_index
151)
152{
153  test_context *ctx = (test_context *) base;
154  rtems_id id = rtems_task_self();
155  bool is_master = rtems_test_parallel_is_master_worker(worker_index);
156  uint32_t counter = 0;
157
158  while (!rtems_test_parallel_stop_job(&ctx->base)) {
159    rtems_status_code sc;
160
161    ++counter;
162
163    sc = rtems_event_send(id, RTEMS_EVENT_0);
164    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
165
166    if (is_master) {
167      rtems_event_set out;
168
169      sc = rtems_event_receive(
170        RTEMS_ALL_EVENTS,
171        RTEMS_WAIT | RTEMS_EVENT_ANY,
172        RTEMS_NO_TIMEOUT,
173        &out
174      );
175      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
176    }
177  }
178
179  ctx->all_to_one_event_ops[active_workers - 1][worker_index] = counter;
180}
181
182static void test_all_to_one_event_fini(
183  rtems_test_parallel_context *base,
184  void *arg,
185  size_t active_workers
186)
187{
188  test_context *ctx = (test_context *) base;
189
190  test_fini(
191    "AllToOneEvent",
192    &ctx->all_to_one_event_ops[active_workers - 1][0],
193    active_workers
194  );
195}
196
197static void test_one_mutex_body(
198  rtems_test_parallel_context *base,
199  void *arg,
200  size_t active_workers,
201  size_t worker_index
202)
203{
204  test_context *ctx = (test_context *) base;
205  rtems_id id = ctx->sema;
206  uint32_t counter = 0;
207
208  while (!rtems_test_parallel_stop_job(&ctx->base)) {
209    rtems_status_code sc;
210
211    ++counter;
212
213    sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
214    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
215
216    sc = rtems_semaphore_release(id);
217    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
218  }
219
220  ctx->one_mutex_ops[active_workers - 1][worker_index] = counter;
221}
222
223static void test_one_mutex_fini(
224  rtems_test_parallel_context *base,
225  void *arg,
226  size_t active_workers
227)
228{
229  test_context *ctx = (test_context *) base;
230
231  test_fini(
232    "OneMutex",
233    &ctx->one_mutex_ops[active_workers - 1][0],
234    active_workers
235  );
236}
237
238static void test_many_mutex_body(
239  rtems_test_parallel_context *base,
240  void *arg,
241  size_t active_workers,
242  size_t worker_index
243)
244{
245  test_context *ctx = (test_context *) base;
246  rtems_status_code sc;
247  rtems_id id;
248  uint32_t counter = 0;
249
250  sc = rtems_semaphore_create(
251    rtems_build_name('T', 'E', 'S', 'T'),
252    1,
253    RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY | RTEMS_PRIORITY,
254    0,
255    &id
256  );
257  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
258
259  while (!rtems_test_parallel_stop_job(&ctx->base)) {
260    rtems_status_code sc;
261
262    ++counter;
263
264    sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
265    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
266
267    sc = rtems_semaphore_release(id);
268    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
269  }
270
271  ctx->many_mutex_ops[active_workers - 1][worker_index] = counter;
272
273  sc = rtems_semaphore_delete(id);
274  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
275}
276
277static void test_many_mutex_fini(
278  rtems_test_parallel_context *base,
279  void *arg,
280  size_t active_workers
281)
282{
283  test_context *ctx = (test_context *) base;
284
285  test_fini(
286    "ManyMutex",
287    &ctx->many_mutex_ops[active_workers - 1][0],
288    active_workers
289  );
290}
291
292static void test_self_msg_body(
293  rtems_test_parallel_context *base,
294  void *arg,
295  size_t active_workers,
296  size_t worker_index
297)
298{
299  test_context *ctx = (test_context *) base;
300  rtems_id id = ctx->mq[worker_index];
301  uint32_t counter = 0;
302
303  while (!rtems_test_parallel_stop_job(&ctx->base)) {
304    rtems_status_code sc;
305    test_msg msg = { .value = 0 };
306    size_t n;
307
308    ++counter;
309
310    sc = rtems_message_queue_send(id, &msg, sizeof(msg));
311    rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_TOO_MANY);
312
313    n = sizeof(msg);
314    sc = rtems_message_queue_receive(
315      id,
316      &msg,
317      &n,
318      RTEMS_WAIT,
319      RTEMS_NO_TIMEOUT
320    );
321    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
322    rtems_test_assert(n == sizeof(msg));
323  }
324
325  ctx->self_msg_ops[active_workers - 1][worker_index] = counter;
326}
327
328static void test_self_msg_fini(
329  rtems_test_parallel_context *base,
330  void *arg,
331  size_t active_workers
332)
333{
334  test_context *ctx = (test_context *) base;
335
336  test_fini(
337    "SelfMsg",
338    &ctx->self_msg_ops[active_workers - 1][0],
339    active_workers
340  );
341}
342
343static void test_many_to_one_msg_body(
344  rtems_test_parallel_context *base,
345  void *arg,
346  size_t active_workers,
347  size_t worker_index
348)
349{
350  test_context *ctx = (test_context *) base;
351  rtems_id id = ctx->mq[0];
352  bool is_master = rtems_test_parallel_is_master_worker(worker_index);
353  uint32_t counter = 0;
354
355  while (!rtems_test_parallel_stop_job(&ctx->base)) {
356    rtems_status_code sc;
357    test_msg msg = { .value = 0 };
358    size_t n;
359
360    ++counter;
361
362    sc = rtems_message_queue_send(id, &msg, sizeof(msg));
363    rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_TOO_MANY);
364
365    if (is_master) {
366      n = sizeof(msg);
367      sc = rtems_message_queue_receive(
368        id,
369        &msg,
370        &n,
371        RTEMS_WAIT,
372        RTEMS_NO_TIMEOUT
373      );
374      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
375      rtems_test_assert(n == sizeof(msg));
376    }
377  }
378
379  ctx->many_to_one_msg_ops[active_workers - 1][worker_index] = counter;
380}
381
382static void test_many_to_one_msg_fini(
383  rtems_test_parallel_context *base,
384  void *arg,
385  size_t active_workers
386)
387{
388  test_context *ctx = (test_context *) base;
389
390  test_fini(
391    "ManyToOneMsg",
392    &ctx->many_to_one_msg_ops[active_workers - 1][0],
393    active_workers
394  );
395}
396
397static void test_many_sys_lock_mutex_body(
398  rtems_test_parallel_context *base,
399  void *arg,
400  size_t active_workers,
401  size_t worker_index
402)
403{
404  test_context *ctx = (test_context *) base;
405  struct _Mutex_Control mtx;
406  uint32_t counter = 0;
407
408  _Mutex_Initialize(&mtx);
409
410  while (!rtems_test_parallel_stop_job(&ctx->base)) {
411    ++counter;
412
413    _Mutex_Acquire(&mtx);
414    _Mutex_Release(&mtx);
415  }
416
417  ctx->many_sys_lock_mutex_ops[active_workers - 1][worker_index] = counter;
418}
419
420static void test_many_sys_lock_mutex_fini(
421  rtems_test_parallel_context *base,
422  void *arg,
423  size_t active_workers
424)
425{
426  test_context *ctx = (test_context *) base;
427
428  test_fini(
429    "ManySysLockMutex",
430    &ctx->many_sys_lock_mutex_ops[active_workers - 1][0],
431    active_workers
432  );
433}
434
435static void test_many_classic_ceiling_body(
436  rtems_test_parallel_context *base,
437  void *arg,
438  size_t active_workers,
439  size_t worker_index
440)
441{
442  test_context *ctx = (test_context *) base;
443  rtems_status_code sc;
444  rtems_id id;
445  uint32_t counter = 0;
446
447  sc = rtems_semaphore_create(
448    rtems_build_name('T', 'E', 'S', 'T'),
449    1,
450    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING | RTEMS_PRIORITY,
451    1,
452    &id
453  );
454  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
455
456  while (!rtems_test_parallel_stop_job(&ctx->base)) {
457    rtems_status_code sc;
458
459    ++counter;
460
461    sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
462    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
463
464    sc = rtems_semaphore_release(id);
465    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
466  }
467
468  ctx->many_classic_ceiling_ops[active_workers - 1][worker_index] = counter;
469
470  sc = rtems_semaphore_delete(id);
471  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
472}
473
474static void test_many_classic_ceiling_fini(
475  rtems_test_parallel_context *base,
476  void *arg,
477  size_t active_workers
478)
479{
480  test_context *ctx = (test_context *) base;
481
482  test_fini(
483    "ManyClassicCeilingMutex",
484    &ctx->many_classic_ceiling_ops[active_workers - 1][0],
485    active_workers
486  );
487}
488
489static void test_many_classic_mrsp_body(
490  rtems_test_parallel_context *base,
491  void *arg,
492  size_t active_workers,
493  size_t worker_index
494)
495{
496  test_context *ctx = (test_context *) base;
497  rtems_status_code sc;
498  rtems_id id;
499  uint32_t counter = 0;
500
501  sc = rtems_semaphore_create(
502    rtems_build_name('T', 'E', 'S', 'T'),
503    1,
504    RTEMS_BINARY_SEMAPHORE | RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
505    1,
506    &id
507  );
508  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
509
510  while (!rtems_test_parallel_stop_job(&ctx->base)) {
511    rtems_status_code sc;
512
513    ++counter;
514
515    sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
516    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
517
518    sc = rtems_semaphore_release(id);
519    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
520  }
521
522  ctx->many_classic_mrsp_ops[active_workers - 1][worker_index] = counter;
523
524  sc = rtems_semaphore_delete(id);
525  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
526}
527
528static void test_many_classic_mrsp_fini(
529  rtems_test_parallel_context *base,
530  void *arg,
531  size_t active_workers
532)
533{
534  test_context *ctx = (test_context *) base;
535
536  test_fini(
537    "ManyClassicMrsPMutex",
538    &ctx->many_classic_mrsp_ops[active_workers - 1][0],
539    active_workers
540  );
541}
542
543static void test_many_pthread_spinlock_body(
544  rtems_test_parallel_context *base,
545  void *arg,
546  size_t active_workers,
547  size_t worker_index
548)
549{
550  test_context *ctx = (test_context *) base;
551  int eno;
552  pthread_spinlock_t spin;
553  uint32_t counter = 0;
554
555  eno = pthread_spin_init(&spin, 0);
556  rtems_test_assert(eno == 0);
557
558  while (!rtems_test_parallel_stop_job(&ctx->base)) {
559    ++counter;
560
561    pthread_spin_lock(&spin);
562    pthread_spin_unlock(&spin);
563  }
564
565  ctx->many_pthread_spinlock_ops[active_workers - 1][worker_index] = counter;
566
567  eno = pthread_spin_destroy(&spin);
568  rtems_test_assert(eno == 0);
569}
570
571static void test_many_pthread_spinlock_fini(
572  rtems_test_parallel_context *base,
573  void *arg,
574  size_t active_workers
575)
576{
577  test_context *ctx = (test_context *) base;
578
579  test_fini(
580    "ManyPthreadSpinlock",
581    &ctx->many_pthread_spinlock_ops[active_workers - 1][0],
582    active_workers
583  );
584}
585
586static void test_many_pthread_mutex_inherit_body(
587  rtems_test_parallel_context *base,
588  void *arg,
589  size_t active_workers,
590  size_t worker_index
591)
592{
593  test_context *ctx = (test_context *) base;
594  int eno;
595  pthread_mutexattr_t attr;
596  pthread_mutex_t mtx;
597  uint32_t counter = 0;
598
599  eno = pthread_mutexattr_init(&attr);
600  rtems_test_assert(eno == 0);
601
602  eno = pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT);
603  rtems_test_assert(eno == 0);
604
605  eno = pthread_mutex_init(&mtx, &attr);
606  rtems_test_assert(eno == 0);
607
608  while (!rtems_test_parallel_stop_job(&ctx->base)) {
609    ++counter;
610
611    pthread_mutex_lock(&mtx);
612    pthread_mutex_unlock(&mtx);
613  }
614
615  ctx->many_pthread_mutex_inherit_ops[active_workers - 1][worker_index] =
616    counter;
617
618  eno = pthread_mutex_destroy(&mtx);
619  rtems_test_assert(eno == 0);
620
621  eno = pthread_mutexattr_destroy(&attr);
622  rtems_test_assert(eno == 0);
623}
624
625static void test_many_pthread_mutex_inherit_fini(
626  rtems_test_parallel_context *base,
627  void *arg,
628  size_t active_workers
629)
630{
631  test_context *ctx = (test_context *) base;
632
633  test_fini(
634    "ManyPthreadMutexInherit",
635    &ctx->many_pthread_mutex_inherit_ops[active_workers - 1][0],
636    active_workers
637  );
638}
639
640static void test_many_pthread_mutex_protect_body(
641  rtems_test_parallel_context *base,
642  void *arg,
643  size_t active_workers,
644  size_t worker_index
645)
646{
647  test_context *ctx = (test_context *) base;
648  int eno;
649  pthread_mutexattr_t attr;
650  pthread_mutex_t mtx;
651  uint32_t counter = 0;
652
653  eno = pthread_mutexattr_init(&attr);
654  rtems_test_assert(eno == 0);
655
656  eno = pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_PROTECT);
657  rtems_test_assert(eno == 0);
658
659  eno = pthread_mutexattr_setprioceiling(
660    &attr,
661    sched_get_priority_max(SCHED_FIFO)
662  );
663  rtems_test_assert(eno == 0);
664
665  eno = pthread_mutex_init(&mtx, &attr);
666  rtems_test_assert(eno == 0);
667
668  while (!rtems_test_parallel_stop_job(&ctx->base)) {
669    ++counter;
670
671    pthread_mutex_lock(&mtx);
672    pthread_mutex_unlock(&mtx);
673  }
674
675  ctx->many_pthread_mutex_protect_ops[active_workers - 1][worker_index] =
676    counter;
677
678  eno = pthread_mutex_destroy(&mtx);
679  rtems_test_assert(eno == 0);
680
681  eno = pthread_mutexattr_destroy(&attr);
682  rtems_test_assert(eno == 0);
683}
684
685static void test_many_pthread_mutex_protect_fini(
686  rtems_test_parallel_context *base,
687  void *arg,
688  size_t active_workers
689)
690{
691  test_context *ctx = (test_context *) base;
692
693  test_fini(
694    "ManyPthreadMutexProtect",
695    &ctx->many_pthread_mutex_protect_ops[active_workers - 1][0],
696    active_workers
697  );
698}
699
700static const rtems_test_parallel_job test_jobs[] = {
701  {
702    .init = test_init,
703    .body = test_self_event_body,
704    .fini = test_self_event_fini,
705    .cascade = true
706  }, {
707    .init = test_init,
708    .body = test_all_to_one_event_body,
709    .fini = test_all_to_one_event_fini,
710    .cascade = true
711  }, {
712    .init = test_init,
713    .body = test_one_mutex_body,
714    .fini = test_one_mutex_fini,
715    .cascade = true
716  }, {
717    .init = test_init,
718    .body = test_many_mutex_body,
719    .fini = test_many_mutex_fini,
720    .cascade = true
721  }, {
722    .init = test_init,
723    .body = test_self_msg_body,
724    .fini = test_self_msg_fini,
725    .cascade = true
726  }, {
727    .init = test_init,
728    .body = test_many_to_one_msg_body,
729    .fini = test_many_to_one_msg_fini,
730    .cascade = true
731  }, {
732    .init = test_init,
733    .body = test_many_sys_lock_mutex_body,
734    .fini = test_many_sys_lock_mutex_fini,
735    .cascade = true
736  }, {
737    .init = test_init,
738    .body = test_many_classic_ceiling_body,
739    .fini = test_many_classic_ceiling_fini,
740    .cascade = true
741  }, {
742    .init = test_init,
743    .body = test_many_classic_mrsp_body,
744    .fini = test_many_classic_mrsp_fini,
745    .cascade = true
746  }, {
747    .init = test_init,
748    .body = test_many_pthread_spinlock_body,
749    .fini = test_many_pthread_spinlock_fini,
750    .cascade = true
751  }, {
752    .init = test_init,
753    .body = test_many_pthread_mutex_inherit_body,
754    .fini = test_many_pthread_mutex_inherit_fini,
755    .cascade = true
756  }, {
757    .init = test_init,
758    .body = test_many_pthread_mutex_protect_body,
759    .fini = test_many_pthread_mutex_protect_fini,
760    .cascade = true
761  }
762};
763
764static void Init(rtems_task_argument arg)
765{
766  test_context *ctx = &test_instance;
767  const char *test = "TestTimeFine01";
768  rtems_status_code sc;
769  size_t i;
770
771  TEST_BEGIN();
772
773  ctx->master = rtems_task_self();
774
775  sc = rtems_semaphore_create(
776    rtems_build_name('T', 'E', 'S', 'T'),
777    1,
778    RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY | RTEMS_PRIORITY,
779    0,
780    &ctx->sema
781  );
782  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
783
784  for (i = 0; i < CPU_COUNT; ++i) {
785    sc = rtems_message_queue_create(
786      rtems_build_name('T', 'E', 'S', 'T'),
787      MSG_COUNT,
788      sizeof(test_msg),
789      RTEMS_DEFAULT_ATTRIBUTES,
790      &ctx->mq[i]
791    );
792    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
793  }
794
795  printf("<%s>\n", test);
796
797  rtems_test_parallel(
798    &ctx->base,
799    NULL,
800    &test_jobs[0],
801    RTEMS_ARRAY_SIZE(test_jobs)
802  );
803
804  printf("</%s>\n", test);
805
806  TEST_END();
807  rtems_test_exit(0);
808}
809
810#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
811#define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
812
813#define CONFIGURE_MAXIMUM_TASKS CPU_COUNT
814
815#define CONFIGURE_MAXIMUM_TIMERS 1
816
817#define CONFIGURE_MAXIMUM_SEMAPHORES (1 + CPU_COUNT)
818
819#define CONFIGURE_MAXIMUM_MESSAGE_QUEUES CPU_COUNT
820
821#define CONFIGURE_MESSAGE_BUFFER_MEMORY \
822  CONFIGURE_MESSAGE_BUFFERS_FOR_QUEUE(MSG_COUNT, sizeof(test_msg))
823
824#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
825
826#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
827
828#define CONFIGURE_INIT_TASK_PRIORITY 2
829
830#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
831
832#define CONFIGURE_INIT
833
834#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.