source: rtems/testsuites/smptests/smpmrsp01/init.c @ 2c14e67

4.115
Last change on this file since 2c14e67 was 2c14e67, checked in by Sebastian Huber <sebastian.huber@…>, on 06/02/15 at 12:22:03

smptests/smpmrsp01: Reduce required CPU count

  • Property mode set to 100644
File size: 50.2 KB
RevLine 
[8fcafdd5]1/*
[be0366b]2 * Copyright (c) 2014-2015 embedded brains GmbH.  All rights reserved.
[8fcafdd5]3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
[2c14e67]19#include <sys/param.h>
20
[8fcafdd5]21#include <stdio.h>
22#include <inttypes.h>
23
24#include <rtems.h>
25#include <rtems/libcsupport.h>
[9f228bea]26#include <rtems/score/schedulersmpimpl.h>
[8fcafdd5]27#include <rtems/score/smpbarrier.h>
[9f228bea]28#include <rtems/score/smplock.h>
[8fcafdd5]29
30#define TESTS_USE_PRINTK
31#include "tmacros.h"
32
33const char rtems_test_name[] = "SMPMRSP 1";
34
35#define CPU_COUNT 32
36
37#define MRSP_COUNT 32
38
[9f228bea]39#define SWITCH_EVENT_COUNT 32
40
[8fcafdd5]41typedef struct {
42  uint32_t sleep;
43  uint32_t timeout;
44  uint32_t obtain[MRSP_COUNT];
[9f228bea]45  uint32_t cpu[CPU_COUNT];
[8fcafdd5]46} counter;
47
[9f228bea]48typedef struct {
49  uint32_t cpu_index;
50  const Thread_Control *executing;
51  const Thread_Control *heir;
52  const Thread_Control *heir_node;
53  Priority_Control heir_priority;
54} switch_event;
55
[8fcafdd5]56typedef struct {
57  rtems_id main_task_id;
[9f228bea]58  rtems_id migration_task_id;
[5bd822a7]59  rtems_id low_task_id[2];
60  rtems_id high_task_id[2];
[cceb19f4]61  rtems_id timer_id;
[8fcafdd5]62  rtems_id counting_sem_id;
63  rtems_id mrsp_ids[MRSP_COUNT];
64  rtems_id scheduler_ids[CPU_COUNT];
65  rtems_id worker_ids[2 * CPU_COUNT];
66  volatile bool stop_worker[CPU_COUNT];
67  counter counters[2 * CPU_COUNT];
[9f228bea]68  uint32_t migration_counters[CPU_COUNT];
[8fcafdd5]69  Thread_Control *worker_task;
70  SMP_barrier_Control barrier;
[9f228bea]71  SMP_lock_Control switch_lock;
72  size_t switch_index;
73  switch_event switch_events[32];
[5bd822a7]74  volatile bool high_run[2];
75  volatile bool low_run[2];
[8fcafdd5]76} test_context;
77
78static test_context test_instance = {
[9f228bea]79  .switch_lock = SMP_LOCK_INITIALIZER("test instance switch lock")
[8fcafdd5]80};
81
[0ff1c29]82static void busy_wait(void)
83{
84  rtems_interval later = rtems_clock_tick_later(2);
85
86  while (rtems_clock_tick_before(later)) {
87    /* Wait */
88  }
89}
90
[be0366b]91static void barrier_init(test_context *ctx)
92{
93  _SMP_barrier_Control_initialize(&ctx->barrier);
94}
95
[8fcafdd5]96static void barrier(test_context *ctx, SMP_barrier_State *bs)
97{
98  _SMP_barrier_Wait(&ctx->barrier, bs, 2);
99}
100
[0ff1c29]101static void barrier_and_delay(test_context *ctx, SMP_barrier_State *bs)
102{
103  barrier(ctx, bs);
104  busy_wait();
105}
106
[9f228bea]107static rtems_task_priority get_prio(rtems_id task_id)
[8fcafdd5]108{
109  rtems_status_code sc;
110  rtems_task_priority prio;
111
112  sc = rtems_task_set_priority(task_id, RTEMS_CURRENT_PRIORITY, &prio);
113  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
[9f228bea]114
115  return prio;
116}
117
118static void wait_for_prio(rtems_id task_id, rtems_task_priority prio)
119{
120  while (get_prio(task_id) != prio) {
121    /* Wait */
122  }
123}
124
125static void assert_prio(rtems_id task_id, rtems_task_priority expected_prio)
126{
127  rtems_test_assert(get_prio(task_id) == expected_prio);
[8fcafdd5]128}
129
130static void change_prio(rtems_id task_id, rtems_task_priority prio)
131{
132  rtems_status_code sc;
133
134  sc = rtems_task_set_priority(task_id, prio, &prio);
135  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
136}
137
138static void assert_executing_worker(test_context *ctx)
139{
140  rtems_test_assert(
141    _CPU_Context_Get_is_executing(&ctx->worker_task->Registers)
142  );
143}
144
[9f228bea]145static void switch_extension(Thread_Control *executing, Thread_Control *heir)
146{
147  test_context *ctx = &test_instance;
148  SMP_lock_Context lock_context;
149  size_t i;
150
151  _SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
152
153  i = ctx->switch_index;
154  if (i < SWITCH_EVENT_COUNT) {
155    switch_event *e = &ctx->switch_events[i];
156    Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node(heir);
157
158    e->cpu_index = rtems_get_current_processor();
159    e->executing = executing;
160    e->heir = heir;
161    e->heir_node = _Scheduler_Node_get_owner(&node->Base);
162    e->heir_priority = node->priority;
163
164    ctx->switch_index = i + 1;
165  }
166
167  _SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
168}
169
170static void reset_switch_events(test_context *ctx)
171{
172  SMP_lock_Context lock_context;
173
174  _SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
175  ctx->switch_index = 0;
176  _SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
177}
178
179static size_t get_switch_events(test_context *ctx)
180{
181  SMP_lock_Context lock_context;
182  size_t events;
183
184  _SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
185  events = ctx->switch_index;
186  _SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
187
188  return events;
189}
190
191static void print_switch_events(test_context *ctx)
192{
193  size_t n = get_switch_events(ctx);
194  size_t i;
195
196  for (i = 0; i < n; ++i) {
197    switch_event *e = &ctx->switch_events[i];
198    char ex[5];
199    char hr[5];
200    char hn[5];
201
202    rtems_object_get_name(e->executing->Object.id, sizeof(ex), &ex[0]);
203    rtems_object_get_name(e->heir->Object.id, sizeof(hr), &hr[0]);
204    rtems_object_get_name(e->heir_node->Object.id, sizeof(hn), &hn[0]);
205
206    printf(
207      "[%" PRIu32 "] %4s -> %4s (prio %3" PRIu32 ", node %4s)\n",
208      e->cpu_index,
209      &ex[0],
210      &hr[0],
211      e->heir_priority,
212      &hn[0]
213    );
214  }
215}
216
[864d3475]217static void run_task(rtems_task_argument arg)
218{
219  volatile bool *run = (volatile bool *) arg;
220
[ad0743db]221  *run = true;
222
[864d3475]223  while (true) {
[ad0743db]224    /* Do nothing */
[864d3475]225  }
226}
227
[8fcafdd5]228static void obtain_and_release_worker(rtems_task_argument arg)
229{
230  test_context *ctx = &test_instance;
231  rtems_status_code sc;
232  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
233
234  ctx->worker_task = _Thread_Get_executing();
235
[864d3475]236  assert_prio(RTEMS_SELF, 4);
[8fcafdd5]237
238  /* Obtain with timeout (A) */
239  barrier(ctx, &barrier_state);
240
241  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
242  rtems_test_assert(sc == RTEMS_TIMEOUT);
243
[864d3475]244  assert_prio(RTEMS_SELF, 4);
[8fcafdd5]245
246  /* Obtain with priority change and timeout (B) */
247  barrier(ctx, &barrier_state);
248
249  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
250  rtems_test_assert(sc == RTEMS_TIMEOUT);
251
[864d3475]252  assert_prio(RTEMS_SELF, 2);
[8fcafdd5]253
254  /* Restore priority (C) */
255  barrier(ctx, &barrier_state);
256
257  /* Obtain without timeout (D) */
258  barrier(ctx, &barrier_state);
259
260  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
261  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
262
[864d3475]263  assert_prio(RTEMS_SELF, 3);
[8fcafdd5]264
265  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
266  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
267
[864d3475]268  assert_prio(RTEMS_SELF, 4);
269
270  /* Obtain and help with timeout (E) */
271  barrier(ctx, &barrier_state);
[8fcafdd5]272
[864d3475]273  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
274  rtems_test_assert(sc == RTEMS_TIMEOUT);
275
276  assert_prio(RTEMS_SELF, 4);
277
278  sc = rtems_task_suspend(ctx->high_task_id[0]);
279  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
280
281  /* Worker done (H) */
[8fcafdd5]282  barrier(ctx, &barrier_state);
283
[77c5ddd4]284  while (true) {
285    /* Wait for termination */
286  }
[8fcafdd5]287}
288
[9f228bea]289static void test_mrsp_obtain_and_release(test_context *ctx)
[8fcafdd5]290{
291  rtems_status_code sc;
292  rtems_task_priority prio;
293  rtems_id scheduler_id;
294  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
295
296  puts("test MrsP obtain and release");
297
[864d3475]298  change_prio(RTEMS_SELF, 3);
299
[be0366b]300  barrier_init(ctx);
[864d3475]301  reset_switch_events(ctx);
302
303  ctx->high_run[0] = false;
304
305  sc = rtems_task_create(
306    rtems_build_name('H', 'I', 'G', '0'),
307    1,
308    RTEMS_MINIMUM_STACK_SIZE,
309    RTEMS_DEFAULT_MODES,
310    RTEMS_DEFAULT_ATTRIBUTES,
311    &ctx->high_task_id[0]
312  );
313  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
[9f228bea]314
[8fcafdd5]315  /* Check executing task parameters */
316
317  sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
318  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
319
320  rtems_test_assert(ctx->scheduler_ids[0] == scheduler_id);
321
322  /* Create a MrsP semaphore object and lock it */
323
324  sc = rtems_semaphore_create(
325    rtems_build_name('M', 'R', 'S', 'P'),
326    1,
327    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
328      | RTEMS_BINARY_SEMAPHORE,
[864d3475]329    2,
[8fcafdd5]330    &ctx->mrsp_ids[0]
331  );
332  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
333
[864d3475]334  assert_prio(RTEMS_SELF, 3);
[8fcafdd5]335
336  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
337  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
338
[864d3475]339  assert_prio(RTEMS_SELF, 2);
[8fcafdd5]340
341  /*
342   * The ceiling priority values per scheduler are equal to the value specified
343   * for object creation.
344   */
345
346  prio = RTEMS_CURRENT_PRIORITY;
347  sc = rtems_semaphore_set_priority(
348    ctx->mrsp_ids[0],
349    ctx->scheduler_ids[0],
350    prio,
351    &prio
352  );
353  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
[864d3475]354  rtems_test_assert(prio == 2);
[8fcafdd5]355
356  /* Check the old value and set a new ceiling priority for scheduler B */
357
[864d3475]358  prio = 3;
[8fcafdd5]359  sc = rtems_semaphore_set_priority(
360    ctx->mrsp_ids[0],
361    ctx->scheduler_ids[1],
362    prio,
363    &prio
364  );
365  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
[864d3475]366  rtems_test_assert(prio == 2);
[8fcafdd5]367
368  /* Check the ceiling priority values */
369
370  prio = RTEMS_CURRENT_PRIORITY;
371  sc = rtems_semaphore_set_priority(
372    ctx->mrsp_ids[0],
373    ctx->scheduler_ids[0],
374    prio,
375    &prio
376  );
377  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
[864d3475]378  rtems_test_assert(prio == 2);
[8fcafdd5]379
380  prio = RTEMS_CURRENT_PRIORITY;
381  sc = rtems_semaphore_set_priority(
382    ctx->mrsp_ids[0],
383    ctx->scheduler_ids[1],
384    prio,
385    &prio
386  );
387  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
[864d3475]388  rtems_test_assert(prio == 3);
[8fcafdd5]389
390  /* Check that a thread waiting to get ownership remains executing */
391
392  sc = rtems_task_create(
393    rtems_build_name('W', 'O', 'R', 'K'),
[864d3475]394    4,
[8fcafdd5]395    RTEMS_MINIMUM_STACK_SIZE,
396    RTEMS_DEFAULT_MODES,
397    RTEMS_DEFAULT_ATTRIBUTES,
398    &ctx->worker_ids[0]
399  );
400  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
401
402  sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
403  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
404
405  sc = rtems_task_start(ctx->worker_ids[0], obtain_and_release_worker, 0);
406  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
407
408  /* Obtain with timeout (A) */
[0ff1c29]409  barrier_and_delay(ctx, &barrier_state);
[8fcafdd5]410
[864d3475]411  assert_prio(ctx->worker_ids[0], 3);
[8fcafdd5]412  assert_executing_worker(ctx);
413
414  /* Obtain with priority change and timeout (B) */
[0ff1c29]415  barrier_and_delay(ctx, &barrier_state);
[8fcafdd5]416
[864d3475]417  assert_prio(ctx->worker_ids[0], 3);
418  change_prio(ctx->worker_ids[0], 2);
[8fcafdd5]419  assert_executing_worker(ctx);
420
421  /* Restore priority (C) */
422  barrier(ctx, &barrier_state);
423
[864d3475]424  assert_prio(ctx->worker_ids[0], 2);
425  change_prio(ctx->worker_ids[0], 4);
[8fcafdd5]426
427  /* Obtain without timeout (D) */
[0ff1c29]428  barrier_and_delay(ctx, &barrier_state);
[8fcafdd5]429
[864d3475]430  assert_prio(ctx->worker_ids[0], 3);
[8fcafdd5]431  assert_executing_worker(ctx);
432
433  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
434  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
435
[864d3475]436  /* Check that a timeout works in case the waiting thread actually helps */
437
438  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
439  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
440
441  /* Obtain and help with timeout (E) */
442  barrier_and_delay(ctx, &barrier_state);
443
444  sc = rtems_task_start(
445    ctx->high_task_id[0],
446    run_task,
447    (rtems_task_argument) &ctx->high_run[0]
448  );
449  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
450
451  rtems_test_assert(rtems_get_current_processor() == 1);
452
453  while (rtems_get_current_processor() != 0) {
454    /* Wait */
455  }
456
457  rtems_test_assert(ctx->high_run[0]);
458
459  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
460  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
461
462  print_switch_events(ctx);
463
464  /* Worker done (H) */
[8fcafdd5]465  barrier(ctx, &barrier_state);
466
467  sc = rtems_task_delete(ctx->worker_ids[0]);
468  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
469
[864d3475]470  sc = rtems_task_delete(ctx->high_task_id[0]);
471  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
472
[8fcafdd5]473  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
474  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
475}
476
[be0366b]477static void obtain_after_migration_worker(rtems_task_argument arg)
478{
479  test_context *ctx = &test_instance;
480  rtems_status_code sc;
481  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
482
483  assert_prio(RTEMS_SELF, 3);
484
485  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
486  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
487
488  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
489  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
490
491  /* Worker done (K) */
492  barrier(ctx, &barrier_state);
493
494  while (true) {
495    /* Wait for termination */
496  }
497}
498
499static void obtain_after_migration_high(rtems_task_argument arg)
500{
501  test_context *ctx = &test_instance;
502  rtems_status_code sc;
503  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
504
505  assert_prio(RTEMS_SELF, 2);
506
507  sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
508  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
509
510  /* Obtain done (I) */
511  barrier(ctx, &barrier_state);
512
513  /* Ready to release (J) */
514  barrier(ctx, &barrier_state);
515
516  sc = rtems_semaphore_release(ctx->mrsp_ids[1]);
517  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
518
519  rtems_task_suspend(RTEMS_SELF);
520  rtems_test_assert(0);
521}
522
523static void test_mrsp_obtain_after_migration(test_context *ctx)
524{
525  rtems_status_code sc;
526  rtems_task_priority prio;
527  rtems_id scheduler_id;
528  SMP_barrier_State barrier_state;
529
530  puts("test MrsP obtain after migration");
531
532  change_prio(RTEMS_SELF, 3);
533
534  barrier_init(ctx);
535  reset_switch_events(ctx);
536
537  /* Create tasks */
538
539  sc = rtems_task_create(
540    rtems_build_name('H', 'I', 'G', '0'),
541    2,
542    RTEMS_MINIMUM_STACK_SIZE,
543    RTEMS_DEFAULT_MODES,
544    RTEMS_DEFAULT_ATTRIBUTES,
545    &ctx->high_task_id[0]
546  );
547  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
548
549  sc = rtems_task_create(
550    rtems_build_name('W', 'O', 'R', 'K'),
551    3,
552    RTEMS_MINIMUM_STACK_SIZE,
553    RTEMS_DEFAULT_MODES,
554    RTEMS_DEFAULT_ATTRIBUTES,
555    &ctx->worker_ids[0]
556  );
557  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
558
559  sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
560  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
561
562  /* Create a MrsP semaphore objects */
563
564  sc = rtems_semaphore_create(
565    rtems_build_name('M', 'R', 'S', 'P'),
566    1,
567    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
568      | RTEMS_BINARY_SEMAPHORE,
569    3,
570    &ctx->mrsp_ids[0]
571  );
572  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
573
574  sc = rtems_semaphore_create(
575    rtems_build_name('M', 'R', 'S', 'P'),
576    1,
577    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
578      | RTEMS_BINARY_SEMAPHORE,
579    2,
580    &ctx->mrsp_ids[1]
581  );
582  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
583
584  sc = rtems_semaphore_create(
585    rtems_build_name('M', 'R', 'S', 'P'),
586    1,
587    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
588      | RTEMS_BINARY_SEMAPHORE,
589    1,
590    &ctx->mrsp_ids[2]
591  );
592  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
593
594  prio = 4;
595  sc = rtems_semaphore_set_priority(
596    ctx->mrsp_ids[2],
597    ctx->scheduler_ids[1],
598    prio,
599    &prio
600  );
601  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
602  rtems_test_assert(prio == 1);
603
604  /* Check executing task parameters */
605
606  sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
607  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
608
609  rtems_test_assert(ctx->scheduler_ids[0] == scheduler_id);
610
611  assert_prio(RTEMS_SELF, 3);
612
613  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
614  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
615
616  assert_prio(RTEMS_SELF, 3);
617
618  /* Start other tasks */
619
620  sc = rtems_task_start(ctx->worker_ids[0], obtain_after_migration_worker, 0);
621  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
622
623  sc = rtems_task_start(ctx->high_task_id[0], obtain_after_migration_high, 0);
624  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
625
626  rtems_test_assert(rtems_get_current_processor() == 1);
627
628  /* Obtain done (I) */
629  _SMP_barrier_State_initialize(&barrier_state);
630  barrier(ctx, &barrier_state);
631
632  sc = rtems_task_suspend(ctx->high_task_id[0]);
633  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
634
635  rtems_test_assert(rtems_get_current_processor() == 1);
636
637  /*
638   * Obtain second MrsP semaphore and ensure that we change the priority of our
639   * own scheduler node and not the one we are currently using.
640   */
641
642  sc = rtems_semaphore_obtain(ctx->mrsp_ids[2], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
643  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
644
645  assert_prio(RTEMS_SELF, 1);
646
647  rtems_test_assert(rtems_get_current_processor() == 1);
648
649  sc = rtems_semaphore_release(ctx->mrsp_ids[2]);
650  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
651
652  sc = rtems_task_resume(ctx->high_task_id[0]);
653  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
654
655  /* Ready to release (J) */
656  barrier(ctx, &barrier_state);
657
658  rtems_test_assert(rtems_get_current_processor() == 1);
659
660  /* Prepare barrier for worker */
661  barrier_init(ctx);
662  _SMP_barrier_State_initialize(&barrier_state);
663
664  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
665  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
666
667  rtems_test_assert(rtems_get_current_processor() == 0);
668
669  print_switch_events(ctx);
670
671  /* Worker done (K) */
672  barrier(ctx, &barrier_state);
673
674  sc = rtems_task_delete(ctx->worker_ids[0]);
675  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
676
677  sc = rtems_task_delete(ctx->high_task_id[0]);
678  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
679
680  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
681  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
682
683  sc = rtems_semaphore_delete(ctx->mrsp_ids[1]);
684  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
685
686  sc = rtems_semaphore_delete(ctx->mrsp_ids[2]);
687  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
688}
689
[8fcafdd5]690static void test_mrsp_flush_error(void)
691{
692  rtems_status_code sc;
693  rtems_id id;
694
695  puts("test MrsP flush error");
696
697  sc = rtems_semaphore_create(
698    rtems_build_name('M', 'R', 'S', 'P'),
699    1,
700    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
701      | RTEMS_BINARY_SEMAPHORE,
702    1,
703    &id
704  );
705  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
706
707  sc = rtems_semaphore_flush(id);
708  rtems_test_assert(sc == RTEMS_NOT_DEFINED);
709
710  sc = rtems_semaphore_delete(id);
711  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
712}
713
714static void test_mrsp_initially_locked_error(void)
715{
716  rtems_status_code sc;
717  rtems_id id;
718
719  puts("test MrsP initially locked error");
720
721  sc = rtems_semaphore_create(
722    rtems_build_name('M', 'R', 'S', 'P'),
723    0,
724    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
725      | RTEMS_BINARY_SEMAPHORE,
726    1,
727    &id
728  );
729  rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
730}
731
732static void test_mrsp_nested_obtain_error(void)
733{
734  rtems_status_code sc;
735  rtems_id id;
736
737  puts("test MrsP nested obtain error");
738
739  sc = rtems_semaphore_create(
740    rtems_build_name('M', 'R', 'S', 'P'),
741    1,
742    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
743      | RTEMS_BINARY_SEMAPHORE,
744    1,
745    &id
746  );
747  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
748
749  sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
750  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
751
752  sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
753  rtems_test_assert(sc == RTEMS_UNSATISFIED);
754
755  sc = rtems_semaphore_release(id);
756  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
757
758  sc = rtems_semaphore_delete(id);
759  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
760}
761
[9553e7a6]762static void test_mrsp_unlock_order_error(void)
763{
764  rtems_status_code sc;
765  rtems_id id_a;
766  rtems_id id_b;
767
768  puts("test MrsP unlock order error");
769
770  sc = rtems_semaphore_create(
771    rtems_build_name(' ', ' ', ' ', 'A'),
772    1,
773    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
774      | RTEMS_BINARY_SEMAPHORE,
775    1,
776    &id_a
777  );
778  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
779
780  sc = rtems_semaphore_create(
781    rtems_build_name(' ', ' ', ' ', 'B'),
782    1,
783    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
784      | RTEMS_BINARY_SEMAPHORE,
785    1,
786    &id_b
787  );
788  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
789
790  sc = rtems_semaphore_obtain(id_a, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
791  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
792
793  sc = rtems_semaphore_obtain(id_b, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
794  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
795
796  sc = rtems_semaphore_release(id_a);
797  rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
798
799  sc = rtems_semaphore_release(id_b);
800  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
801
802  sc = rtems_semaphore_release(id_a);
803  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
804
805  sc = rtems_semaphore_delete(id_a);
806  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
807
808  sc = rtems_semaphore_delete(id_b);
809  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
810}
811
812static void deadlock_worker(rtems_task_argument arg)
813{
814  test_context *ctx = &test_instance;
815  rtems_status_code sc;
816
817  sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
818  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
819
820  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
821  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
822
823  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
824  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
825
826  sc = rtems_semaphore_release(ctx->mrsp_ids[1]);
827  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
828
829  sc = rtems_event_transient_send(ctx->main_task_id);
830  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
831
832  rtems_task_suspend(RTEMS_SELF);
833  rtems_test_assert(0);
834}
835
[9f228bea]836static void test_mrsp_deadlock_error(test_context *ctx)
[9553e7a6]837{
838  rtems_status_code sc;
839  rtems_task_priority prio = 2;
840
841  puts("test MrsP deadlock error");
842
[9f228bea]843  change_prio(RTEMS_SELF, prio);
[9553e7a6]844
845  sc = rtems_semaphore_create(
846    rtems_build_name(' ', ' ', ' ', 'A'),
847    1,
848    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
849      | RTEMS_BINARY_SEMAPHORE,
850    prio,
851    &ctx->mrsp_ids[0]
852  );
853  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
854
855  sc = rtems_semaphore_create(
856    rtems_build_name(' ', ' ', ' ', 'B'),
857    1,
858    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
859      | RTEMS_BINARY_SEMAPHORE,
860    prio,
861    &ctx->mrsp_ids[1]
862  );
863  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
864
865  sc = rtems_task_create(
866    rtems_build_name('W', 'O', 'R', 'K'),
867    prio,
868    RTEMS_MINIMUM_STACK_SIZE,
869    RTEMS_DEFAULT_MODES,
870    RTEMS_DEFAULT_ATTRIBUTES,
871    &ctx->worker_ids[0]
872  );
873  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
874
875  sc = rtems_task_start(ctx->worker_ids[0], deadlock_worker, 0);
876  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
877
878  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
879  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
880
881  sc = rtems_task_wake_after(RTEMS_YIELD_PROCESSOR);
882  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
883
884  sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
885  rtems_test_assert(sc == RTEMS_UNSATISFIED);
886
887  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
888  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
889
890  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
891  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
892
893  sc = rtems_task_delete(ctx->worker_ids[0]);
894  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
895
896  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
897  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
898
899  sc = rtems_semaphore_delete(ctx->mrsp_ids[1]);
900  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
901}
902
903static void test_mrsp_multiple_obtain(void)
904{
905  rtems_status_code sc;
906  rtems_id sem_a_id;
907  rtems_id sem_b_id;
908  rtems_id sem_c_id;
909
910  puts("test MrsP multiple obtain");
911
912  change_prio(RTEMS_SELF, 4);
913
914  sc = rtems_semaphore_create(
915    rtems_build_name(' ', ' ', ' ', 'A'),
916    1,
917    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
918      | RTEMS_BINARY_SEMAPHORE,
919    3,
920    &sem_a_id
921  );
922  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
923
924  sc = rtems_semaphore_create(
925    rtems_build_name(' ', ' ', ' ', 'B'),
926    1,
927    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
928      | RTEMS_BINARY_SEMAPHORE,
929    2,
930    &sem_b_id
931  );
932  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
933
934  sc = rtems_semaphore_create(
935    rtems_build_name(' ', ' ', ' ', 'C'),
936    1,
937    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
938      | RTEMS_BINARY_SEMAPHORE,
939    1,
940    &sem_c_id
941  );
942  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
943
944  assert_prio(RTEMS_SELF, 4);
945
946  sc = rtems_semaphore_obtain(sem_a_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
947  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
948
949  assert_prio(RTEMS_SELF, 3);
950
951  sc = rtems_semaphore_obtain(sem_b_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
952  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
953
954  assert_prio(RTEMS_SELF, 2);
955
956  sc = rtems_semaphore_obtain(sem_c_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
957  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
958
959  assert_prio(RTEMS_SELF, 1);
960
961  sc = rtems_semaphore_release(sem_c_id);
962  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
963
964  assert_prio(RTEMS_SELF, 2);
965
966  sc = rtems_semaphore_release(sem_b_id);
967  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
968
969  assert_prio(RTEMS_SELF, 3);
970
971  sc = rtems_semaphore_release(sem_a_id);
972  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
973
974  assert_prio(RTEMS_SELF, 4);
975
976  sc = rtems_semaphore_obtain(sem_a_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
977  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
978
979  assert_prio(RTEMS_SELF, 3);
980
981  sc = rtems_semaphore_obtain(sem_b_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
982  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
983
984  assert_prio(RTEMS_SELF, 2);
985
986  sc = rtems_semaphore_obtain(sem_c_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
987  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
988
989  assert_prio(RTEMS_SELF, 1);
990  change_prio(RTEMS_SELF, 3);
991  assert_prio(RTEMS_SELF, 1);
992
993  sc = rtems_semaphore_release(sem_c_id);
994  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
995
996  assert_prio(RTEMS_SELF, 2);
997
998  sc = rtems_semaphore_release(sem_b_id);
999  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1000
1001  assert_prio(RTEMS_SELF, 3);
1002
1003  sc = rtems_semaphore_release(sem_a_id);
1004  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1005
1006  assert_prio(RTEMS_SELF, 3);
1007
1008  sc = rtems_semaphore_delete(sem_a_id);
1009  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1010
1011  sc = rtems_semaphore_delete(sem_b_id);
1012  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1013
1014  sc = rtems_semaphore_delete(sem_c_id);
1015  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1016}
1017
[cceb19f4]1018static void ready_unlock_worker(rtems_task_argument arg)
1019{
1020  test_context *ctx = &test_instance;
1021  rtems_status_code sc;
1022  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
1023
1024  assert_prio(RTEMS_SELF, 4);
1025
1026  /* Obtain (F) */
1027  barrier(ctx, &barrier_state);
1028
1029  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1030  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1031
1032  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
1033  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1034
1035  assert_prio(RTEMS_SELF, 4);
1036
1037  /* Done (G) */
1038  barrier(ctx, &barrier_state);
1039
[5bd822a7]1040  while (true) {
1041    /* Do nothing */
1042  }
[cceb19f4]1043}
1044
1045static void unblock_ready_timer(rtems_id timer_id, void *arg)
1046{
1047  test_context *ctx = arg;
1048  rtems_status_code sc;
1049
1050  sc = rtems_task_start(
[5bd822a7]1051    ctx->high_task_id[0],
[cceb19f4]1052    run_task,
[5bd822a7]1053    (rtems_task_argument) &ctx->high_run[0]
[cceb19f4]1054  );
1055  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1056
[5bd822a7]1057  sc = rtems_task_suspend(ctx->high_task_id[0]);
[cceb19f4]1058  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1059
[5bd822a7]1060  sc = rtems_task_resume(ctx->high_task_id[0]);
[cceb19f4]1061  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1062
1063  /*
1064   * At this point the scheduler node of the main thread is in the
1065   * SCHEDULER_SMP_NODE_READY state and a _Scheduler_SMP_Unblock() operation is
1066   * performed.
1067   */
1068  sc = rtems_event_transient_send(ctx->main_task_id);
1069  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1070
[5bd822a7]1071  sc = rtems_task_suspend(ctx->high_task_id[0]);
[cceb19f4]1072  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1073}
1074
1075static void unblock_ready_owner(test_context *ctx)
1076{
1077  rtems_status_code sc;
1078
1079  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1080  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1081
1082  assert_prio(RTEMS_SELF, 3);
1083
1084  sc = rtems_timer_fire_after(ctx->timer_id, 2, unblock_ready_timer, ctx);
1085  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1086
1087  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1088  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1089
[5bd822a7]1090  rtems_test_assert(!ctx->high_run[0]);
[cceb19f4]1091}
1092
[5bd822a7]1093static void unblock_owner_before_rival_timer(rtems_id timer_id, void *arg)
[cceb19f4]1094{
[5bd822a7]1095  test_context *ctx = arg;
[cceb19f4]1096  rtems_status_code sc;
1097
[5bd822a7]1098  sc = rtems_task_suspend(ctx->high_task_id[0]);
1099  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1100
1101  sc = rtems_task_suspend(ctx->high_task_id[1]);
[cceb19f4]1102  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
[5bd822a7]1103}
1104
1105static void unblock_owner_after_rival_timer(rtems_id timer_id, void *arg)
1106{
1107  test_context *ctx = arg;
1108  rtems_status_code sc;
1109
1110  sc = rtems_task_suspend(ctx->high_task_id[1]);
1111  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1112
1113  sc = rtems_task_suspend(ctx->high_task_id[0]);
1114  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1115}
1116
1117static void various_block_unblock(test_context *ctx)
1118{
1119  rtems_status_code sc;
1120  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
[cceb19f4]1121
1122  /* Worker obtain (F) */
[0ff1c29]1123  barrier_and_delay(ctx, &barrier_state);
[cceb19f4]1124
1125  sc = rtems_task_suspend(ctx->worker_ids[0]);
1126  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1127
[0ff1c29]1128  busy_wait();
[cceb19f4]1129
[5bd822a7]1130  sc = rtems_task_start(
1131    ctx->high_task_id[1],
1132    run_task,
1133    (rtems_task_argument) &ctx->high_run[1]
1134  );
[cceb19f4]1135  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1136
[5bd822a7]1137  while (!ctx->high_run[1]) {
[cceb19f4]1138    /* Do noting */
1139  }
1140
1141  sc = rtems_task_resume(ctx->worker_ids[0]);
1142  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1143
[5bd822a7]1144  /* Try to schedule a blocked active rival */
1145
1146  sc = rtems_task_suspend(ctx->worker_ids[0]);
1147  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1148
1149  sc = rtems_task_suspend(ctx->high_task_id[1]);
1150  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1151
1152  sc = rtems_task_resume(ctx->high_task_id[1]);
1153  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1154
1155  sc = rtems_task_resume(ctx->worker_ids[0]);
1156  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1157
1158  rtems_test_assert(rtems_get_current_processor() == 0);
1159
1160  /* Use node of the active rival */
1161
1162  sc = rtems_task_suspend(ctx->high_task_id[1]);
1163  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1164
1165  sc = rtems_task_resume(ctx->high_task_id[0]);
1166  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1167
1168  rtems_test_assert(rtems_get_current_processor() == 1);
1169
1170  sc = rtems_task_suspend(ctx->worker_ids[0]);
1171  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1172
1173  sc = rtems_task_resume(ctx->worker_ids[0]);
1174  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1175
1176  /*
1177   * Try to schedule an active rival with an already scheduled active owner
1178   * user.
1179   */
1180
1181  sc = rtems_timer_fire_after(
1182    ctx->timer_id,
1183    2,
1184    unblock_owner_before_rival_timer,
1185    ctx
1186  );
1187  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1188
1189  /* This will take the processor away from us, the timer will help later */
1190  sc = rtems_task_resume(ctx->high_task_id[1]);
1191  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1192
1193  /*
1194   * Try to schedule an active owner with an already scheduled active rival
1195   * user.
1196   */
1197
1198  sc = rtems_task_resume(ctx->high_task_id[0]);
1199  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1200
1201  sc = rtems_timer_fire_after(
1202    ctx->timer_id,
1203    2,
1204    unblock_owner_after_rival_timer,
1205    ctx
1206  );
1207  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1208
1209  /* This will take the processor away from us, the timer will help later */
1210  sc = rtems_task_resume(ctx->high_task_id[1]);
[cceb19f4]1211  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1212
1213  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
1214  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1215
[5bd822a7]1216  rtems_test_assert(rtems_get_current_processor() == 0);
1217
[cceb19f4]1218  assert_prio(RTEMS_SELF, 4);
1219
[5bd822a7]1220  /* Worker done (G) */
[cceb19f4]1221  barrier(ctx, &barrier_state);
1222}
1223
[5bd822a7]1224static void start_low_task(test_context *ctx, size_t i)
[cceb19f4]1225{
1226  rtems_status_code sc;
1227
[5bd822a7]1228  sc = rtems_task_create(
1229    rtems_build_name('L', 'O', 'W', '0' + i),
1230    5,
1231    RTEMS_MINIMUM_STACK_SIZE,
1232    RTEMS_DEFAULT_MODES,
1233    RTEMS_DEFAULT_ATTRIBUTES,
1234    &ctx->low_task_id[i]
1235  );
1236  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1237
1238  sc = rtems_task_set_scheduler(ctx->low_task_id[i], ctx->scheduler_ids[i]);
1239  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1240
1241  sc = rtems_task_start(
1242    ctx->low_task_id[i],
1243    run_task,
1244    (rtems_task_argument) &ctx->low_run[i]
1245  );
1246  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1247}
1248
1249static void test_mrsp_various_block_and_unblock(test_context *ctx)
1250{
1251  rtems_status_code sc;
[cceb19f4]1252
[5bd822a7]1253  puts("test MrsP various block and unblock");
[cceb19f4]1254
1255  change_prio(RTEMS_SELF, 4);
1256
[be0366b]1257  barrier_init(ctx);
[5bd822a7]1258  reset_switch_events(ctx);
1259
1260  ctx->low_run[0] = false;
1261  ctx->low_run[1] = false;
1262  ctx->high_run[0] = false;
1263  ctx->high_run[1] = false;
1264
[cceb19f4]1265  sc = rtems_semaphore_create(
1266    rtems_build_name(' ', ' ', ' ', 'A'),
1267    1,
1268    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
1269      | RTEMS_BINARY_SEMAPHORE,
1270    3,
1271    &ctx->mrsp_ids[0]
1272  );
1273  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1274
1275  assert_prio(RTEMS_SELF, 4);
1276
1277  sc = rtems_task_create(
[5bd822a7]1278    rtems_build_name('H', 'I', 'G', '0'),
[cceb19f4]1279    2,
1280    RTEMS_MINIMUM_STACK_SIZE,
1281    RTEMS_DEFAULT_MODES,
1282    RTEMS_DEFAULT_ATTRIBUTES,
[5bd822a7]1283    &ctx->high_task_id[0]
[cceb19f4]1284  );
1285  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1286
[5bd822a7]1287  sc = rtems_task_create(
1288    rtems_build_name('H', 'I', 'G', '1'),
1289    2,
1290    RTEMS_MINIMUM_STACK_SIZE,
1291    RTEMS_DEFAULT_MODES,
1292    RTEMS_DEFAULT_ATTRIBUTES,
1293    &ctx->high_task_id[1]
1294  );
1295  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1296
1297  sc = rtems_task_set_scheduler(ctx->high_task_id[1], ctx->scheduler_ids[1]);
1298  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1299
[cceb19f4]1300  sc = rtems_task_create(
1301    rtems_build_name('W', 'O', 'R', 'K'),
1302    4,
1303    RTEMS_MINIMUM_STACK_SIZE,
1304    RTEMS_DEFAULT_MODES,
1305    RTEMS_DEFAULT_ATTRIBUTES,
1306    &ctx->worker_ids[0]
1307  );
1308  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1309
1310  sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
1311  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1312
[5bd822a7]1313  sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
1314  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1315
[cceb19f4]1316  sc = rtems_timer_create(
1317    rtems_build_name('T', 'I', 'M', 'R'),
1318    &ctx->timer_id
1319  );
1320  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1321
[5bd822a7]1322  /* In case these tasks run, then we have a MrsP protocol violation */
1323  start_low_task(ctx, 0);
1324  start_low_task(ctx, 1);
1325
[cceb19f4]1326  unblock_ready_owner(ctx);
[5bd822a7]1327  various_block_unblock(ctx);
1328
1329  rtems_test_assert(!ctx->low_run[0]);
1330  rtems_test_assert(!ctx->low_run[1]);
1331
1332  print_switch_events(ctx);
[cceb19f4]1333
1334  sc = rtems_timer_delete(ctx->timer_id);
1335  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1336
[5bd822a7]1337  sc = rtems_task_delete(ctx->high_task_id[0]);
1338  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1339
1340  sc = rtems_task_delete(ctx->high_task_id[1]);
1341  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1342
[cceb19f4]1343  sc = rtems_task_delete(ctx->worker_ids[0]);
1344  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1345
[5bd822a7]1346  sc = rtems_task_delete(ctx->low_task_id[0]);
[cceb19f4]1347  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1348
[5bd822a7]1349  sc = rtems_task_delete(ctx->low_task_id[1]);
[cceb19f4]1350  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1351
[5bd822a7]1352  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
[cceb19f4]1353  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1354}
1355
[9f228bea]1356static void test_mrsp_obtain_and_sleep_and_release(test_context *ctx)
1357{
1358  rtems_status_code sc;
1359  rtems_id sem_id;
1360  rtems_id run_task_id;
1361  volatile bool run = false;
1362
1363  puts("test MrsP obtain and sleep and release");
1364
1365  change_prio(RTEMS_SELF, 1);
1366
1367  reset_switch_events(ctx);
1368
1369  sc = rtems_task_create(
1370    rtems_build_name(' ', 'R', 'U', 'N'),
1371    2,
1372    RTEMS_MINIMUM_STACK_SIZE,
1373    RTEMS_DEFAULT_MODES,
1374    RTEMS_DEFAULT_ATTRIBUTES,
1375    &run_task_id
1376  );
1377  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1378
1379  sc = rtems_task_start(run_task_id, run_task, (rtems_task_argument) &run);
1380  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1381
1382  sc = rtems_semaphore_create(
1383    rtems_build_name('S', 'E', 'M', 'A'),
1384    1,
1385    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
1386      | RTEMS_BINARY_SEMAPHORE,
1387    1,
1388    &sem_id
1389  );
1390  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1391
1392  rtems_test_assert(!run);
1393
1394  sc = rtems_task_wake_after(2);
1395  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1396
1397  rtems_test_assert(run);
1398  run = false;
1399
1400  sc = rtems_semaphore_obtain(sem_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1401  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1402
1403  rtems_test_assert(!run);
1404
1405  sc = rtems_task_wake_after(2);
1406  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1407
1408  rtems_test_assert(!run);
1409
1410  sc = rtems_semaphore_release(sem_id);
1411  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1412
1413  print_switch_events(ctx);
1414
1415  sc = rtems_semaphore_delete(sem_id);
1416  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1417
1418  sc = rtems_task_delete(run_task_id);
1419  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1420}
1421
1422static void help_task(rtems_task_argument arg)
1423{
1424  test_context *ctx = &test_instance;
1425  rtems_status_code sc;
1426
1427  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1428  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1429
1430  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
1431  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1432
1433  while (true) {
1434    /* Do nothing */
1435  }
1436}
1437
1438static void test_mrsp_obtain_and_release_with_help(test_context *ctx)
1439{
1440  rtems_status_code sc;
1441  rtems_id help_task_id;
1442  rtems_id run_task_id;
1443  volatile bool run = false;
1444
1445  puts("test MrsP obtain and release with help");
1446
1447  change_prio(RTEMS_SELF, 3);
1448
1449  reset_switch_events(ctx);
1450
1451  sc = rtems_semaphore_create(
1452    rtems_build_name('S', 'E', 'M', 'A'),
1453    1,
1454    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
1455      | RTEMS_BINARY_SEMAPHORE,
1456    2,
1457    &ctx->mrsp_ids[0]
1458  );
1459  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1460
1461  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1462  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1463
1464  assert_prio(RTEMS_SELF, 2);
1465
1466  sc = rtems_task_create(
1467    rtems_build_name('H', 'E', 'L', 'P'),
1468    3,
1469    RTEMS_MINIMUM_STACK_SIZE,
1470    RTEMS_DEFAULT_MODES,
1471    RTEMS_DEFAULT_ATTRIBUTES,
1472    &help_task_id
1473  );
1474  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1475
1476  sc = rtems_task_set_scheduler(
1477    help_task_id,
1478    ctx->scheduler_ids[1]
1479  );
1480  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1481
1482  sc = rtems_task_start(help_task_id, help_task, 0);
1483  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1484
1485  sc = rtems_task_create(
1486    rtems_build_name(' ', 'R', 'U', 'N'),
1487    4,
1488    RTEMS_MINIMUM_STACK_SIZE,
1489    RTEMS_DEFAULT_MODES,
1490    RTEMS_DEFAULT_ATTRIBUTES,
1491    &run_task_id
1492  );
1493  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1494
1495  sc = rtems_task_start(run_task_id, run_task, (rtems_task_argument) &run);
1496  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1497
1498  wait_for_prio(help_task_id, 2);
1499
1500  sc = rtems_task_wake_after(2);
1501  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1502
1503  rtems_test_assert(rtems_get_current_processor() == 0);
1504  rtems_test_assert(!run);
1505
1506  change_prio(run_task_id, 1);
1507
1508  rtems_test_assert(rtems_get_current_processor() == 1);
1509
1510  while (!run) {
1511    /* Wait */
1512  }
1513
1514  sc = rtems_task_wake_after(2);
1515  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1516
1517  rtems_test_assert(rtems_get_current_processor() == 1);
1518
1519  change_prio(run_task_id, 4);
1520
1521  rtems_test_assert(rtems_get_current_processor() == 1);
1522
1523  sc = rtems_task_wake_after(2);
1524  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1525
1526  rtems_test_assert(rtems_get_current_processor() == 1);
1527
[27783f6]1528  /*
1529   * With this operation the scheduler instance 0 has now only the main and the
1530   * idle threads in the ready set.
1531   */
1532  sc = rtems_task_suspend(run_task_id);
1533  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1534
1535  rtems_test_assert(rtems_get_current_processor() == 1);
1536
1537  change_prio(RTEMS_SELF, 1);
1538  change_prio(RTEMS_SELF, 3);
1539
[9f228bea]1540  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
1541  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1542
[27783f6]1543  rtems_test_assert(rtems_get_current_processor() == 0);
1544
[9f228bea]1545  assert_prio(RTEMS_SELF, 3);
1546
1547  wait_for_prio(help_task_id, 3);
1548
1549  print_switch_events(ctx);
1550
1551  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
1552  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1553
1554  sc = rtems_task_delete(help_task_id);
1555  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1556
1557  sc = rtems_task_delete(run_task_id);
1558  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1559}
1560
[8fcafdd5]1561static uint32_t simple_random(uint32_t v)
1562{
1563  v *= 1664525;
1564  v += 1013904223;
1565
1566  return v;
1567}
1568
1569static rtems_interval timeout(uint32_t v)
1570{
1571  return (v >> 23) % 4;
1572}
1573
1574static void load_worker(rtems_task_argument index)
1575{
1576  test_context *ctx = &test_instance;
1577  rtems_status_code sc;
1578  uint32_t v = index;
1579
1580  while (!ctx->stop_worker[index]) {
1581    uint32_t i = (v >> 13) % MRSP_COUNT;
1582
1583    assert_prio(RTEMS_SELF, 3 + CPU_COUNT + index);
1584
1585    if ((v >> 7) % 1024 == 0) {
1586      /* Give some time to the lower priority tasks */
1587
1588      ++ctx->counters[index].sleep;
1589
1590      sc = rtems_task_wake_after(1);
1591      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
[9f228bea]1592
1593      ++ctx->counters[index].cpu[rtems_get_current_processor()];
[8fcafdd5]1594    } else {
1595      uint32_t n = (v >> 17) % (i + 1);
1596      uint32_t s;
1597      uint32_t t;
1598
1599      /* Nested obtain */
1600      for (s = 0; s <= n; ++s) {
1601        uint32_t k = i - s;
1602
1603        sc = rtems_semaphore_obtain(ctx->mrsp_ids[k], RTEMS_WAIT, timeout(v));
1604        if (sc == RTEMS_SUCCESSFUL) {
1605          ++ctx->counters[index].obtain[n];
1606
1607          assert_prio(RTEMS_SELF, 3 + k);
1608        } else {
1609          rtems_test_assert(sc == RTEMS_TIMEOUT);
1610
1611          ++ctx->counters[index].timeout;
1612
1613          break;
1614        }
1615
[9f228bea]1616        ++ctx->counters[index].cpu[rtems_get_current_processor()];
1617
[8fcafdd5]1618        v = simple_random(v);
1619      }
1620
1621      /* Release in reverse obtain order */
1622      for (t = 0; t < s; ++t) {
1623        uint32_t k = i + t - s + 1;
1624
1625        sc = rtems_semaphore_release(ctx->mrsp_ids[k]);
1626        rtems_test_assert(sc == RTEMS_SUCCESSFUL);
[9f228bea]1627
1628        ++ctx->counters[index].cpu[rtems_get_current_processor()];
[8fcafdd5]1629      }
1630    }
1631
1632    v = simple_random(v);
1633  }
1634
1635  sc = rtems_semaphore_release(ctx->counting_sem_id);
1636  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1637
1638  rtems_task_suspend(RTEMS_SELF);
1639  rtems_test_assert(0);
1640}
1641
[9f228bea]1642static void migration_task(rtems_task_argument arg)
[8fcafdd5]1643{
1644  test_context *ctx = &test_instance;
[9f228bea]1645  rtems_status_code sc;
1646  uint32_t cpu_count = rtems_get_processor_count();
1647  uint32_t v = 0xdeadbeef;
1648
1649  while (true) {
1650    uint32_t cpu_index = (v >> 5) % cpu_count;
1651
1652    sc = rtems_task_set_scheduler(RTEMS_SELF, ctx->scheduler_ids[cpu_index]);
1653    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1654
1655    ++ctx->migration_counters[rtems_get_current_processor()];
1656
1657    v = simple_random(v);
1658  }
1659}
1660
1661static void test_mrsp_load(test_context *ctx)
1662{
[8fcafdd5]1663  rtems_status_code sc;
1664  uint32_t cpu_count = rtems_get_processor_count();
1665  uint32_t index;
1666
1667  puts("test MrsP load");
1668
[9f228bea]1669  change_prio(RTEMS_SELF, 2);
1670
1671  sc = rtems_task_create(
1672    rtems_build_name('M', 'I', 'G', 'R'),
1673    2,
1674    RTEMS_MINIMUM_STACK_SIZE,
1675    RTEMS_DEFAULT_MODES,
1676    RTEMS_DEFAULT_ATTRIBUTES,
1677    &ctx->migration_task_id
1678  );
1679  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1680
1681  sc = rtems_task_start(ctx->migration_task_id, migration_task, 0);
1682  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
[8fcafdd5]1683
1684  sc = rtems_semaphore_create(
1685    rtems_build_name('S', 'Y', 'N', 'C'),
1686    0,
1687    RTEMS_COUNTING_SEMAPHORE,
1688    0,
1689    &ctx->counting_sem_id
1690  );
1691  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1692
1693  for (index = 0; index < MRSP_COUNT; ++index) {
1694    sc = rtems_semaphore_create(
1695      'A' + index,
1696      1,
1697      RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
1698        | RTEMS_BINARY_SEMAPHORE,
1699      3 + index,
1700      &ctx->mrsp_ids[index]
1701    );
1702    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1703  }
1704
1705  for (index = 0; index < cpu_count; ++index) {
1706    uint32_t a = 2 * index;
1707    uint32_t b = a + 1;
1708
1709    sc = rtems_task_create(
1710      'A' + a,
1711      3 + MRSP_COUNT + a,
1712      RTEMS_MINIMUM_STACK_SIZE,
1713      RTEMS_DEFAULT_MODES,
1714      RTEMS_DEFAULT_ATTRIBUTES,
1715      &ctx->worker_ids[a]
1716    );
1717    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1718
1719    sc = rtems_task_set_scheduler(
1720      ctx->worker_ids[a],
1721      ctx->scheduler_ids[index]
1722    );
1723    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1724
1725    sc = rtems_task_start(
1726      ctx->worker_ids[a],
1727      load_worker,
1728      (rtems_task_argument) a
1729    );
1730    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1731
1732    sc = rtems_task_create(
1733      'A' + b,
1734      3 + MRSP_COUNT + b,
1735      RTEMS_MINIMUM_STACK_SIZE,
1736      RTEMS_DEFAULT_MODES,
1737      RTEMS_DEFAULT_ATTRIBUTES,
1738      &ctx->worker_ids[b]
1739    );
1740    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1741
1742    sc = rtems_task_set_scheduler(
1743      ctx->worker_ids[b],
1744      ctx->scheduler_ids[index]
1745    );
1746    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1747
1748    sc = rtems_task_start(
1749      ctx->worker_ids[b],
1750      load_worker,
1751      (rtems_task_argument) b
1752    );
1753    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1754  }
1755
1756  sc = rtems_task_wake_after(30 * rtems_clock_get_ticks_per_second());
1757  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1758
1759  for (index = 0; index < 2 * cpu_count; ++index) {
1760    ctx->stop_worker[index] = true;
1761  }
1762
1763  for (index = 0; index < 2 * cpu_count; ++index) {
1764    sc = rtems_semaphore_obtain(
1765      ctx->counting_sem_id,
1766      RTEMS_WAIT,
1767      RTEMS_NO_TIMEOUT
1768    );
1769    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1770  }
1771
1772  for (index = 0; index < 2 * cpu_count; ++index) {
1773    sc = rtems_task_delete(ctx->worker_ids[index]);
1774    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1775  }
1776
1777  for (index = 0; index < MRSP_COUNT; ++index) {
1778    sc = rtems_semaphore_delete(ctx->mrsp_ids[index]);
1779    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1780  }
1781
1782  sc = rtems_semaphore_delete(ctx->counting_sem_id);
1783  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1784
[9f228bea]1785  sc = rtems_task_delete(ctx->migration_task_id);
1786  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1787
[8fcafdd5]1788  for (index = 0; index < 2 * cpu_count; ++index) {
1789    uint32_t nest_level;
[9f228bea]1790    uint32_t cpu_index;
[8fcafdd5]1791
1792    printf(
[9f228bea]1793      "worker[%" PRIu32 "]\n"
[8fcafdd5]1794        "  sleep = %" PRIu32 "\n"
1795        "  timeout = %" PRIu32 "\n",
[9f228bea]1796      index,
[8fcafdd5]1797      ctx->counters[index].sleep,
1798      ctx->counters[index].timeout
1799    );
1800
1801    for (nest_level = 0; nest_level < MRSP_COUNT; ++nest_level) {
1802      printf(
1803        "  obtain[%" PRIu32 "] = %" PRIu32 "\n",
1804        nest_level,
1805        ctx->counters[index].obtain[nest_level]
1806      );
1807    }
[9f228bea]1808
1809    for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
1810      printf(
1811        "  cpu[%" PRIu32 "] = %" PRIu32 "\n",
1812        cpu_index,
1813        ctx->counters[index].cpu[cpu_index]
1814      );
1815    }
1816  }
1817
1818  for (index = 0; index < cpu_count; ++index) {
1819    printf(
1820      "migrations[%" PRIu32 "] = %" PRIu32 "\n",
1821      index,
1822      ctx->migration_counters[index]
1823    );
[8fcafdd5]1824  }
1825}
1826
1827static void Init(rtems_task_argument arg)
1828{
1829  test_context *ctx = &test_instance;
1830  rtems_status_code sc;
1831  rtems_resource_snapshot snapshot;
1832  uint32_t cpu_count = rtems_get_processor_count();
1833  uint32_t cpu_index;
1834
1835  TEST_BEGIN();
1836
1837  rtems_resource_snapshot_take(&snapshot);
1838
1839  ctx->main_task_id = rtems_task_self();
1840
[2c14e67]1841  for (cpu_index = 0; cpu_index < MIN(2, cpu_count); ++cpu_index) {
[8fcafdd5]1842    sc = rtems_scheduler_ident(cpu_index, &ctx->scheduler_ids[cpu_index]);
1843    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1844  }
1845
[1ee0d5f]1846  for (cpu_index = 2; cpu_index < cpu_count; ++cpu_index) {
1847    sc = rtems_scheduler_ident(
1848      cpu_index / 2 + 1,
1849      &ctx->scheduler_ids[cpu_index]
1850    );
1851    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1852  }
1853
[8fcafdd5]1854  test_mrsp_flush_error();
1855  test_mrsp_initially_locked_error();
1856  test_mrsp_nested_obtain_error();
[9553e7a6]1857  test_mrsp_unlock_order_error();
[9f228bea]1858  test_mrsp_deadlock_error(ctx);
[9553e7a6]1859  test_mrsp_multiple_obtain();
[2c14e67]1860
1861  if (cpu_count > 1) {
1862    test_mrsp_various_block_and_unblock(ctx);
1863    test_mrsp_obtain_after_migration(ctx);
1864    test_mrsp_obtain_and_sleep_and_release(ctx);
1865    test_mrsp_obtain_and_release_with_help(ctx);
1866    test_mrsp_obtain_and_release(ctx);
1867    test_mrsp_load(ctx);
1868  }
[8fcafdd5]1869
1870  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
1871
1872  TEST_END();
1873  rtems_test_exit(0);
1874}
1875
1876#define CONFIGURE_SMP_APPLICATION
1877
1878#define CONFIGURE_MICROSECONDS_PER_TICK 1000
1879
1880#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
1881#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
1882
[9f228bea]1883#define CONFIGURE_MAXIMUM_TASKS (2 * CPU_COUNT + 2)
[8fcafdd5]1884#define CONFIGURE_MAXIMUM_SEMAPHORES (MRSP_COUNT + 1)
1885#define CONFIGURE_MAXIMUM_MRSP_SEMAPHORES MRSP_COUNT
1886#define CONFIGURE_MAXIMUM_TIMERS 1
1887
1888#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
1889
1890#define CONFIGURE_SCHEDULER_SIMPLE_SMP
1891
1892#include <rtems/scheduler.h>
1893
1894RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(0);
1895RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(1);
1896RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(2);
1897RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(3);
1898RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(4);
1899RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(5);
1900RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(6);
1901RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(7);
1902RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(8);
1903RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(9);
1904RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(10);
1905RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(11);
1906RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(12);
1907RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(13);
1908RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(14);
1909RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(15);
1910RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(16);
1911
1912#define CONFIGURE_SCHEDULER_CONTROLS \
1913  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(0, 0), \
1914  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(1, 1), \
1915  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(2, 2), \
1916  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(3, 3), \
1917  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(4, 4), \
1918  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(5, 5), \
1919  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(6, 6), \
1920  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(7, 7), \
1921  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(8, 8), \
1922  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(9, 9), \
1923  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(10, 10), \
1924  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(11, 11), \
1925  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(12, 12), \
1926  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(13, 13), \
1927  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(14, 14), \
1928  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(15, 15), \
1929  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(16, 16)
1930
1931#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
1932  RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
[2c14e67]1933  RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
[8fcafdd5]1934  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1935  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1936  RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1937  RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1938  RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1939  RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1940  RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1941  RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1942  RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1943  RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1944  RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1945  RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1946  RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1947  RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1948  RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1949  RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1950  RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1951  RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1952  RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1953  RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1954  RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1955  RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1956  RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1957  RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1958  RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1959  RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1960  RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1961  RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1962  RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1963  RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
1964
[9f228bea]1965#define CONFIGURE_INITIAL_EXTENSIONS \
1966  { .thread_switch = switch_extension }, \
1967  RTEMS_TEST_INITIAL_EXTENSION
[8fcafdd5]1968
[9f228bea]1969#define CONFIGURE_INIT_TASK_NAME rtems_build_name('M', 'A', 'I', 'N')
[8fcafdd5]1970#define CONFIGURE_INIT_TASK_PRIORITY 2
1971
1972#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
1973
1974#define CONFIGURE_INIT
1975
1976#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.