source: rtems/testsuites/smptests/smpmrsp01/init.c @ 864d3475

4.115
Last change on this file since 864d3475 was 864d3475, checked in by Sebastian Huber <sebastian.huber@…>, on 12/17/14 at 14:11:00

smp: Fix timeout for MrsP semaphores

The previous timeout handling was flawed. In case a waiting thread
helped out the owner could use the scheduler node indefinitely long.
Update the resource tree in _MRSP_Timeout() to avoid this issue.

Bug reported by Luca Bonato.

  • Property mode set to 100644
File size: 44.6 KB
Line 
1/*
2 * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#include <stdio.h>
20#include <inttypes.h>
21
22#include <rtems.h>
23#include <rtems/libcsupport.h>
24#include <rtems/score/schedulersmpimpl.h>
25#include <rtems/score/smpbarrier.h>
26#include <rtems/score/smplock.h>
27
28#define TESTS_USE_PRINTK
29#include "tmacros.h"
30
31const char rtems_test_name[] = "SMPMRSP 1";
32
33#define CPU_COUNT 32
34
35#define MRSP_COUNT 32
36
37#define SWITCH_EVENT_COUNT 32
38
39typedef struct {
40  uint32_t sleep;
41  uint32_t timeout;
42  uint32_t obtain[MRSP_COUNT];
43  uint32_t cpu[CPU_COUNT];
44} counter;
45
46typedef struct {
47  uint32_t cpu_index;
48  const Thread_Control *executing;
49  const Thread_Control *heir;
50  const Thread_Control *heir_node;
51  Priority_Control heir_priority;
52} switch_event;
53
54typedef struct {
55  rtems_id main_task_id;
56  rtems_id migration_task_id;
57  rtems_id low_task_id[2];
58  rtems_id high_task_id[2];
59  rtems_id timer_id;
60  rtems_id counting_sem_id;
61  rtems_id mrsp_ids[MRSP_COUNT];
62  rtems_id scheduler_ids[CPU_COUNT];
63  rtems_id worker_ids[2 * CPU_COUNT];
64  volatile bool stop_worker[CPU_COUNT];
65  counter counters[2 * CPU_COUNT];
66  uint32_t migration_counters[CPU_COUNT];
67  Thread_Control *worker_task;
68  SMP_barrier_Control barrier;
69  SMP_lock_Control switch_lock;
70  size_t switch_index;
71  switch_event switch_events[32];
72  volatile bool high_run[2];
73  volatile bool low_run[2];
74} test_context;
75
76static test_context test_instance = {
77  .barrier = SMP_BARRIER_CONTROL_INITIALIZER,
78  .switch_lock = SMP_LOCK_INITIALIZER("test instance switch lock")
79};
80
81static void busy_wait(void)
82{
83  rtems_interval later = rtems_clock_tick_later(2);
84
85  while (rtems_clock_tick_before(later)) {
86    /* Wait */
87  }
88}
89
90static void barrier(test_context *ctx, SMP_barrier_State *bs)
91{
92  _SMP_barrier_Wait(&ctx->barrier, bs, 2);
93}
94
95static void barrier_and_delay(test_context *ctx, SMP_barrier_State *bs)
96{
97  barrier(ctx, bs);
98  busy_wait();
99}
100
101static rtems_task_priority get_prio(rtems_id task_id)
102{
103  rtems_status_code sc;
104  rtems_task_priority prio;
105
106  sc = rtems_task_set_priority(task_id, RTEMS_CURRENT_PRIORITY, &prio);
107  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
108
109  return prio;
110}
111
112static void wait_for_prio(rtems_id task_id, rtems_task_priority prio)
113{
114  while (get_prio(task_id) != prio) {
115    /* Wait */
116  }
117}
118
119static void assert_prio(rtems_id task_id, rtems_task_priority expected_prio)
120{
121  rtems_test_assert(get_prio(task_id) == expected_prio);
122}
123
124static void change_prio(rtems_id task_id, rtems_task_priority prio)
125{
126  rtems_status_code sc;
127
128  sc = rtems_task_set_priority(task_id, prio, &prio);
129  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
130}
131
132static void assert_executing_worker(test_context *ctx)
133{
134  rtems_test_assert(
135    _CPU_Context_Get_is_executing(&ctx->worker_task->Registers)
136  );
137}
138
139static void switch_extension(Thread_Control *executing, Thread_Control *heir)
140{
141  test_context *ctx = &test_instance;
142  SMP_lock_Context lock_context;
143  size_t i;
144
145  _SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
146
147  i = ctx->switch_index;
148  if (i < SWITCH_EVENT_COUNT) {
149    switch_event *e = &ctx->switch_events[i];
150    Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node(heir);
151
152    e->cpu_index = rtems_get_current_processor();
153    e->executing = executing;
154    e->heir = heir;
155    e->heir_node = _Scheduler_Node_get_owner(&node->Base);
156    e->heir_priority = node->priority;
157
158    ctx->switch_index = i + 1;
159  }
160
161  _SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
162}
163
164static void reset_switch_events(test_context *ctx)
165{
166  SMP_lock_Context lock_context;
167
168  _SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
169  ctx->switch_index = 0;
170  _SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
171}
172
173static size_t get_switch_events(test_context *ctx)
174{
175  SMP_lock_Context lock_context;
176  size_t events;
177
178  _SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
179  events = ctx->switch_index;
180  _SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
181
182  return events;
183}
184
185static void print_switch_events(test_context *ctx)
186{
187  size_t n = get_switch_events(ctx);
188  size_t i;
189
190  for (i = 0; i < n; ++i) {
191    switch_event *e = &ctx->switch_events[i];
192    char ex[5];
193    char hr[5];
194    char hn[5];
195
196    rtems_object_get_name(e->executing->Object.id, sizeof(ex), &ex[0]);
197    rtems_object_get_name(e->heir->Object.id, sizeof(hr), &hr[0]);
198    rtems_object_get_name(e->heir_node->Object.id, sizeof(hn), &hn[0]);
199
200    printf(
201      "[%" PRIu32 "] %4s -> %4s (prio %3" PRIu32 ", node %4s)\n",
202      e->cpu_index,
203      &ex[0],
204      &hr[0],
205      e->heir_priority,
206      &hn[0]
207    );
208  }
209}
210
211static void run_task(rtems_task_argument arg)
212{
213  volatile bool *run = (volatile bool *) arg;
214
215  while (true) {
216    *run = true;
217  }
218}
219
220static void obtain_and_release_worker(rtems_task_argument arg)
221{
222  test_context *ctx = &test_instance;
223  rtems_status_code sc;
224  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
225
226  ctx->worker_task = _Thread_Get_executing();
227
228  assert_prio(RTEMS_SELF, 4);
229
230  /* Obtain with timeout (A) */
231  barrier(ctx, &barrier_state);
232
233  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
234  rtems_test_assert(sc == RTEMS_TIMEOUT);
235
236  assert_prio(RTEMS_SELF, 4);
237
238  /* Obtain with priority change and timeout (B) */
239  barrier(ctx, &barrier_state);
240
241  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
242  rtems_test_assert(sc == RTEMS_TIMEOUT);
243
244  assert_prio(RTEMS_SELF, 2);
245
246  /* Restore priority (C) */
247  barrier(ctx, &barrier_state);
248
249  /* Obtain without timeout (D) */
250  barrier(ctx, &barrier_state);
251
252  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
253  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
254
255  assert_prio(RTEMS_SELF, 3);
256
257  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
258  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
259
260  assert_prio(RTEMS_SELF, 4);
261
262  /* Obtain and help with timeout (E) */
263  barrier(ctx, &barrier_state);
264
265  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
266  rtems_test_assert(sc == RTEMS_TIMEOUT);
267
268  assert_prio(RTEMS_SELF, 4);
269
270  sc = rtems_task_suspend(ctx->high_task_id[0]);
271  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
272
273  /* Worker done (H) */
274  barrier(ctx, &barrier_state);
275
276  rtems_task_suspend(RTEMS_SELF);
277  rtems_test_assert(0);
278}
279
280static void test_mrsp_obtain_and_release(test_context *ctx)
281{
282  rtems_status_code sc;
283  rtems_task_priority prio;
284  rtems_id scheduler_id;
285  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
286
287  puts("test MrsP obtain and release");
288
289  change_prio(RTEMS_SELF, 3);
290
291  reset_switch_events(ctx);
292
293  ctx->high_run[0] = false;
294
295  sc = rtems_task_create(
296    rtems_build_name('H', 'I', 'G', '0'),
297    1,
298    RTEMS_MINIMUM_STACK_SIZE,
299    RTEMS_DEFAULT_MODES,
300    RTEMS_DEFAULT_ATTRIBUTES,
301    &ctx->high_task_id[0]
302  );
303  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
304
305  /* Check executing task parameters */
306
307  sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
308  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
309
310  rtems_test_assert(ctx->scheduler_ids[0] == scheduler_id);
311
312  /* Create a MrsP semaphore object and lock it */
313
314  sc = rtems_semaphore_create(
315    rtems_build_name('M', 'R', 'S', 'P'),
316    1,
317    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
318      | RTEMS_BINARY_SEMAPHORE,
319    2,
320    &ctx->mrsp_ids[0]
321  );
322  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
323
324  assert_prio(RTEMS_SELF, 3);
325
326  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
327  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
328
329  assert_prio(RTEMS_SELF, 2);
330
331  /*
332   * The ceiling priority values per scheduler are equal to the value specified
333   * for object creation.
334   */
335
336  prio = RTEMS_CURRENT_PRIORITY;
337  sc = rtems_semaphore_set_priority(
338    ctx->mrsp_ids[0],
339    ctx->scheduler_ids[0],
340    prio,
341    &prio
342  );
343  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
344  rtems_test_assert(prio == 2);
345
346  /* Check the old value and set a new ceiling priority for scheduler B */
347
348  prio = 3;
349  sc = rtems_semaphore_set_priority(
350    ctx->mrsp_ids[0],
351    ctx->scheduler_ids[1],
352    prio,
353    &prio
354  );
355  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
356  rtems_test_assert(prio == 2);
357
358  /* Check the ceiling priority values */
359
360  prio = RTEMS_CURRENT_PRIORITY;
361  sc = rtems_semaphore_set_priority(
362    ctx->mrsp_ids[0],
363    ctx->scheduler_ids[0],
364    prio,
365    &prio
366  );
367  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
368  rtems_test_assert(prio == 2);
369
370  prio = RTEMS_CURRENT_PRIORITY;
371  sc = rtems_semaphore_set_priority(
372    ctx->mrsp_ids[0],
373    ctx->scheduler_ids[1],
374    prio,
375    &prio
376  );
377  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
378  rtems_test_assert(prio == 3);
379
380  /* Check that a thread waiting to get ownership remains executing */
381
382  sc = rtems_task_create(
383    rtems_build_name('W', 'O', 'R', 'K'),
384    4,
385    RTEMS_MINIMUM_STACK_SIZE,
386    RTEMS_DEFAULT_MODES,
387    RTEMS_DEFAULT_ATTRIBUTES,
388    &ctx->worker_ids[0]
389  );
390  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
391
392  sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
393  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
394
395  sc = rtems_task_start(ctx->worker_ids[0], obtain_and_release_worker, 0);
396  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
397
398  /* Obtain with timeout (A) */
399  barrier_and_delay(ctx, &barrier_state);
400
401  assert_prio(ctx->worker_ids[0], 3);
402  assert_executing_worker(ctx);
403
404  /* Obtain with priority change and timeout (B) */
405  barrier_and_delay(ctx, &barrier_state);
406
407  assert_prio(ctx->worker_ids[0], 3);
408  change_prio(ctx->worker_ids[0], 2);
409  assert_executing_worker(ctx);
410
411  /* Restore priority (C) */
412  barrier(ctx, &barrier_state);
413
414  assert_prio(ctx->worker_ids[0], 2);
415  change_prio(ctx->worker_ids[0], 4);
416
417  /* Obtain without timeout (D) */
418  barrier_and_delay(ctx, &barrier_state);
419
420  assert_prio(ctx->worker_ids[0], 3);
421  assert_executing_worker(ctx);
422
423  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
424  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
425
426  /* Check that a timeout works in case the waiting thread actually helps */
427
428  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
429  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
430
431  /* Obtain and help with timeout (E) */
432  barrier_and_delay(ctx, &barrier_state);
433
434  sc = rtems_task_start(
435    ctx->high_task_id[0],
436    run_task,
437    (rtems_task_argument) &ctx->high_run[0]
438  );
439  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
440
441  rtems_test_assert(rtems_get_current_processor() == 1);
442
443  while (rtems_get_current_processor() != 0) {
444    /* Wait */
445  }
446
447  rtems_test_assert(ctx->high_run[0]);
448
449  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
450  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
451
452  print_switch_events(ctx);
453
454  /* Worker done (H) */
455  barrier(ctx, &barrier_state);
456
457  sc = rtems_task_delete(ctx->worker_ids[0]);
458  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
459
460  sc = rtems_task_delete(ctx->high_task_id[0]);
461  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
462
463  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
464  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
465}
466
467static void test_mrsp_flush_error(void)
468{
469  rtems_status_code sc;
470  rtems_id id;
471
472  puts("test MrsP flush error");
473
474  sc = rtems_semaphore_create(
475    rtems_build_name('M', 'R', 'S', 'P'),
476    1,
477    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
478      | RTEMS_BINARY_SEMAPHORE,
479    1,
480    &id
481  );
482  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
483
484  sc = rtems_semaphore_flush(id);
485  rtems_test_assert(sc == RTEMS_NOT_DEFINED);
486
487  sc = rtems_semaphore_delete(id);
488  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
489}
490
491static void test_mrsp_initially_locked_error(void)
492{
493  rtems_status_code sc;
494  rtems_id id;
495
496  puts("test MrsP initially locked error");
497
498  sc = rtems_semaphore_create(
499    rtems_build_name('M', 'R', 'S', 'P'),
500    0,
501    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
502      | RTEMS_BINARY_SEMAPHORE,
503    1,
504    &id
505  );
506  rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
507}
508
509static void test_mrsp_nested_obtain_error(void)
510{
511  rtems_status_code sc;
512  rtems_id id;
513
514  puts("test MrsP nested obtain error");
515
516  sc = rtems_semaphore_create(
517    rtems_build_name('M', 'R', 'S', 'P'),
518    1,
519    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
520      | RTEMS_BINARY_SEMAPHORE,
521    1,
522    &id
523  );
524  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
525
526  sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
527  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
528
529  sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
530  rtems_test_assert(sc == RTEMS_UNSATISFIED);
531
532  sc = rtems_semaphore_release(id);
533  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
534
535  sc = rtems_semaphore_delete(id);
536  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
537}
538
539static void test_mrsp_unlock_order_error(void)
540{
541  rtems_status_code sc;
542  rtems_id id_a;
543  rtems_id id_b;
544
545  puts("test MrsP unlock order error");
546
547  sc = rtems_semaphore_create(
548    rtems_build_name(' ', ' ', ' ', 'A'),
549    1,
550    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
551      | RTEMS_BINARY_SEMAPHORE,
552    1,
553    &id_a
554  );
555  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
556
557  sc = rtems_semaphore_create(
558    rtems_build_name(' ', ' ', ' ', 'B'),
559    1,
560    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
561      | RTEMS_BINARY_SEMAPHORE,
562    1,
563    &id_b
564  );
565  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
566
567  sc = rtems_semaphore_obtain(id_a, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
568  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
569
570  sc = rtems_semaphore_obtain(id_b, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
571  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
572
573  sc = rtems_semaphore_release(id_a);
574  rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
575
576  sc = rtems_semaphore_release(id_b);
577  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
578
579  sc = rtems_semaphore_release(id_a);
580  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
581
582  sc = rtems_semaphore_delete(id_a);
583  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
584
585  sc = rtems_semaphore_delete(id_b);
586  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
587}
588
589static void deadlock_worker(rtems_task_argument arg)
590{
591  test_context *ctx = &test_instance;
592  rtems_status_code sc;
593
594  sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
595  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
596
597  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
598  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
599
600  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
601  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
602
603  sc = rtems_semaphore_release(ctx->mrsp_ids[1]);
604  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
605
606  sc = rtems_event_transient_send(ctx->main_task_id);
607  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
608
609  rtems_task_suspend(RTEMS_SELF);
610  rtems_test_assert(0);
611}
612
613static void test_mrsp_deadlock_error(test_context *ctx)
614{
615  rtems_status_code sc;
616  rtems_task_priority prio = 2;
617
618  puts("test MrsP deadlock error");
619
620  change_prio(RTEMS_SELF, prio);
621
622  sc = rtems_semaphore_create(
623    rtems_build_name(' ', ' ', ' ', 'A'),
624    1,
625    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
626      | RTEMS_BINARY_SEMAPHORE,
627    prio,
628    &ctx->mrsp_ids[0]
629  );
630  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
631
632  sc = rtems_semaphore_create(
633    rtems_build_name(' ', ' ', ' ', 'B'),
634    1,
635    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
636      | RTEMS_BINARY_SEMAPHORE,
637    prio,
638    &ctx->mrsp_ids[1]
639  );
640  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
641
642  sc = rtems_task_create(
643    rtems_build_name('W', 'O', 'R', 'K'),
644    prio,
645    RTEMS_MINIMUM_STACK_SIZE,
646    RTEMS_DEFAULT_MODES,
647    RTEMS_DEFAULT_ATTRIBUTES,
648    &ctx->worker_ids[0]
649  );
650  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
651
652  sc = rtems_task_start(ctx->worker_ids[0], deadlock_worker, 0);
653  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
654
655  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
656  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
657
658  sc = rtems_task_wake_after(RTEMS_YIELD_PROCESSOR);
659  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
660
661  sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
662  rtems_test_assert(sc == RTEMS_UNSATISFIED);
663
664  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
665  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
666
667  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
668  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
669
670  sc = rtems_task_delete(ctx->worker_ids[0]);
671  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
672
673  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
674  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
675
676  sc = rtems_semaphore_delete(ctx->mrsp_ids[1]);
677  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
678}
679
680static void test_mrsp_multiple_obtain(void)
681{
682  rtems_status_code sc;
683  rtems_id sem_a_id;
684  rtems_id sem_b_id;
685  rtems_id sem_c_id;
686
687  puts("test MrsP multiple obtain");
688
689  change_prio(RTEMS_SELF, 4);
690
691  sc = rtems_semaphore_create(
692    rtems_build_name(' ', ' ', ' ', 'A'),
693    1,
694    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
695      | RTEMS_BINARY_SEMAPHORE,
696    3,
697    &sem_a_id
698  );
699  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
700
701  sc = rtems_semaphore_create(
702    rtems_build_name(' ', ' ', ' ', 'B'),
703    1,
704    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
705      | RTEMS_BINARY_SEMAPHORE,
706    2,
707    &sem_b_id
708  );
709  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
710
711  sc = rtems_semaphore_create(
712    rtems_build_name(' ', ' ', ' ', 'C'),
713    1,
714    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
715      | RTEMS_BINARY_SEMAPHORE,
716    1,
717    &sem_c_id
718  );
719  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
720
721  assert_prio(RTEMS_SELF, 4);
722
723  sc = rtems_semaphore_obtain(sem_a_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
724  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
725
726  assert_prio(RTEMS_SELF, 3);
727
728  sc = rtems_semaphore_obtain(sem_b_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
729  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
730
731  assert_prio(RTEMS_SELF, 2);
732
733  sc = rtems_semaphore_obtain(sem_c_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
734  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
735
736  assert_prio(RTEMS_SELF, 1);
737
738  sc = rtems_semaphore_release(sem_c_id);
739  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
740
741  assert_prio(RTEMS_SELF, 2);
742
743  sc = rtems_semaphore_release(sem_b_id);
744  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
745
746  assert_prio(RTEMS_SELF, 3);
747
748  sc = rtems_semaphore_release(sem_a_id);
749  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
750
751  assert_prio(RTEMS_SELF, 4);
752
753  sc = rtems_semaphore_obtain(sem_a_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
754  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
755
756  assert_prio(RTEMS_SELF, 3);
757
758  sc = rtems_semaphore_obtain(sem_b_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
759  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
760
761  assert_prio(RTEMS_SELF, 2);
762
763  sc = rtems_semaphore_obtain(sem_c_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
764  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
765
766  assert_prio(RTEMS_SELF, 1);
767  change_prio(RTEMS_SELF, 3);
768  assert_prio(RTEMS_SELF, 1);
769
770  sc = rtems_semaphore_release(sem_c_id);
771  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
772
773  assert_prio(RTEMS_SELF, 2);
774
775  sc = rtems_semaphore_release(sem_b_id);
776  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
777
778  assert_prio(RTEMS_SELF, 3);
779
780  sc = rtems_semaphore_release(sem_a_id);
781  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
782
783  assert_prio(RTEMS_SELF, 3);
784
785  sc = rtems_semaphore_delete(sem_a_id);
786  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
787
788  sc = rtems_semaphore_delete(sem_b_id);
789  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
790
791  sc = rtems_semaphore_delete(sem_c_id);
792  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
793}
794
795static void ready_unlock_worker(rtems_task_argument arg)
796{
797  test_context *ctx = &test_instance;
798  rtems_status_code sc;
799  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
800
801  assert_prio(RTEMS_SELF, 4);
802
803  /* Obtain (F) */
804  barrier(ctx, &barrier_state);
805
806  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
807  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
808
809  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
810  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
811
812  assert_prio(RTEMS_SELF, 4);
813
814  /* Done (G) */
815  barrier(ctx, &barrier_state);
816
817  while (true) {
818    /* Do nothing */
819  }
820}
821
822static void unblock_ready_timer(rtems_id timer_id, void *arg)
823{
824  test_context *ctx = arg;
825  rtems_status_code sc;
826
827  sc = rtems_task_start(
828    ctx->high_task_id[0],
829    run_task,
830    (rtems_task_argument) &ctx->high_run[0]
831  );
832  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
833
834  sc = rtems_task_suspend(ctx->high_task_id[0]);
835  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
836
837  sc = rtems_task_resume(ctx->high_task_id[0]);
838  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
839
840  /*
841   * At this point the scheduler node of the main thread is in the
842   * SCHEDULER_SMP_NODE_READY state and a _Scheduler_SMP_Unblock() operation is
843   * performed.
844   */
845  sc = rtems_event_transient_send(ctx->main_task_id);
846  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
847
848  sc = rtems_task_suspend(ctx->high_task_id[0]);
849  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
850}
851
852static void unblock_ready_owner(test_context *ctx)
853{
854  rtems_status_code sc;
855
856  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
857  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
858
859  assert_prio(RTEMS_SELF, 3);
860
861  sc = rtems_timer_fire_after(ctx->timer_id, 2, unblock_ready_timer, ctx);
862  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
863
864  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
865  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
866
867  rtems_test_assert(!ctx->high_run[0]);
868}
869
870static void unblock_owner_before_rival_timer(rtems_id timer_id, void *arg)
871{
872  test_context *ctx = arg;
873  rtems_status_code sc;
874
875  sc = rtems_task_suspend(ctx->high_task_id[0]);
876  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
877
878  sc = rtems_task_suspend(ctx->high_task_id[1]);
879  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
880}
881
882static void unblock_owner_after_rival_timer(rtems_id timer_id, void *arg)
883{
884  test_context *ctx = arg;
885  rtems_status_code sc;
886
887  sc = rtems_task_suspend(ctx->high_task_id[1]);
888  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
889
890  sc = rtems_task_suspend(ctx->high_task_id[0]);
891  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
892}
893
894static void various_block_unblock(test_context *ctx)
895{
896  rtems_status_code sc;
897  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
898
899  /* Worker obtain (F) */
900  barrier_and_delay(ctx, &barrier_state);
901
902  sc = rtems_task_suspend(ctx->worker_ids[0]);
903  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
904
905  busy_wait();
906
907  sc = rtems_task_start(
908    ctx->high_task_id[1],
909    run_task,
910    (rtems_task_argument) &ctx->high_run[1]
911  );
912  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
913
914  while (!ctx->high_run[1]) {
915    /* Do noting */
916  }
917
918  sc = rtems_task_resume(ctx->worker_ids[0]);
919  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
920
921  /* Try to schedule a blocked active rival */
922
923  sc = rtems_task_suspend(ctx->worker_ids[0]);
924  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
925
926  sc = rtems_task_suspend(ctx->high_task_id[1]);
927  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
928
929  sc = rtems_task_resume(ctx->high_task_id[1]);
930  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
931
932  sc = rtems_task_resume(ctx->worker_ids[0]);
933  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
934
935  rtems_test_assert(rtems_get_current_processor() == 0);
936
937  /* Use node of the active rival */
938
939  sc = rtems_task_suspend(ctx->high_task_id[1]);
940  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
941
942  sc = rtems_task_resume(ctx->high_task_id[0]);
943  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
944
945  rtems_test_assert(rtems_get_current_processor() == 1);
946
947  sc = rtems_task_suspend(ctx->worker_ids[0]);
948  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
949
950  sc = rtems_task_resume(ctx->worker_ids[0]);
951  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
952
953  /*
954   * Try to schedule an active rival with an already scheduled active owner
955   * user.
956   */
957
958  sc = rtems_timer_fire_after(
959    ctx->timer_id,
960    2,
961    unblock_owner_before_rival_timer,
962    ctx
963  );
964  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
965
966  /* This will take the processor away from us, the timer will help later */
967  sc = rtems_task_resume(ctx->high_task_id[1]);
968  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
969
970  /*
971   * Try to schedule an active owner with an already scheduled active rival
972   * user.
973   */
974
975  sc = rtems_task_resume(ctx->high_task_id[0]);
976  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
977
978  sc = rtems_timer_fire_after(
979    ctx->timer_id,
980    2,
981    unblock_owner_after_rival_timer,
982    ctx
983  );
984  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
985
986  /* This will take the processor away from us, the timer will help later */
987  sc = rtems_task_resume(ctx->high_task_id[1]);
988  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
989
990  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
991  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
992
993  rtems_test_assert(rtems_get_current_processor() == 0);
994
995  assert_prio(RTEMS_SELF, 4);
996
997  /* Worker done (G) */
998  barrier(ctx, &barrier_state);
999}
1000
1001static void start_low_task(test_context *ctx, size_t i)
1002{
1003  rtems_status_code sc;
1004
1005  sc = rtems_task_create(
1006    rtems_build_name('L', 'O', 'W', '0' + i),
1007    5,
1008    RTEMS_MINIMUM_STACK_SIZE,
1009    RTEMS_DEFAULT_MODES,
1010    RTEMS_DEFAULT_ATTRIBUTES,
1011    &ctx->low_task_id[i]
1012  );
1013  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1014
1015  sc = rtems_task_set_scheduler(ctx->low_task_id[i], ctx->scheduler_ids[i]);
1016  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1017
1018  sc = rtems_task_start(
1019    ctx->low_task_id[i],
1020    run_task,
1021    (rtems_task_argument) &ctx->low_run[i]
1022  );
1023  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1024}
1025
1026static void test_mrsp_various_block_and_unblock(test_context *ctx)
1027{
1028  rtems_status_code sc;
1029
1030  puts("test MrsP various block and unblock");
1031
1032  change_prio(RTEMS_SELF, 4);
1033
1034  reset_switch_events(ctx);
1035
1036  ctx->low_run[0] = false;
1037  ctx->low_run[1] = false;
1038  ctx->high_run[0] = false;
1039  ctx->high_run[1] = false;
1040
1041  sc = rtems_semaphore_create(
1042    rtems_build_name(' ', ' ', ' ', 'A'),
1043    1,
1044    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
1045      | RTEMS_BINARY_SEMAPHORE,
1046    3,
1047    &ctx->mrsp_ids[0]
1048  );
1049  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1050
1051  assert_prio(RTEMS_SELF, 4);
1052
1053  sc = rtems_task_create(
1054    rtems_build_name('H', 'I', 'G', '0'),
1055    2,
1056    RTEMS_MINIMUM_STACK_SIZE,
1057    RTEMS_DEFAULT_MODES,
1058    RTEMS_DEFAULT_ATTRIBUTES,
1059    &ctx->high_task_id[0]
1060  );
1061  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1062
1063  sc = rtems_task_create(
1064    rtems_build_name('H', 'I', 'G', '1'),
1065    2,
1066    RTEMS_MINIMUM_STACK_SIZE,
1067    RTEMS_DEFAULT_MODES,
1068    RTEMS_DEFAULT_ATTRIBUTES,
1069    &ctx->high_task_id[1]
1070  );
1071  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1072
1073  sc = rtems_task_set_scheduler(ctx->high_task_id[1], ctx->scheduler_ids[1]);
1074  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1075
1076  sc = rtems_task_create(
1077    rtems_build_name('W', 'O', 'R', 'K'),
1078    4,
1079    RTEMS_MINIMUM_STACK_SIZE,
1080    RTEMS_DEFAULT_MODES,
1081    RTEMS_DEFAULT_ATTRIBUTES,
1082    &ctx->worker_ids[0]
1083  );
1084  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1085
1086  sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
1087  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1088
1089  sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
1090  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1091
1092  sc = rtems_timer_create(
1093    rtems_build_name('T', 'I', 'M', 'R'),
1094    &ctx->timer_id
1095  );
1096  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1097
1098  /* In case these tasks run, then we have a MrsP protocol violation */
1099  start_low_task(ctx, 0);
1100  start_low_task(ctx, 1);
1101
1102  unblock_ready_owner(ctx);
1103  various_block_unblock(ctx);
1104
1105  rtems_test_assert(!ctx->low_run[0]);
1106  rtems_test_assert(!ctx->low_run[1]);
1107
1108  print_switch_events(ctx);
1109
1110  sc = rtems_timer_delete(ctx->timer_id);
1111  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1112
1113  sc = rtems_task_delete(ctx->high_task_id[0]);
1114  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1115
1116  sc = rtems_task_delete(ctx->high_task_id[1]);
1117  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1118
1119  sc = rtems_task_delete(ctx->worker_ids[0]);
1120  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1121
1122  sc = rtems_task_delete(ctx->low_task_id[0]);
1123  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1124
1125  sc = rtems_task_delete(ctx->low_task_id[1]);
1126  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1127
1128  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
1129  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1130}
1131
1132static void test_mrsp_obtain_and_sleep_and_release(test_context *ctx)
1133{
1134  rtems_status_code sc;
1135  rtems_id sem_id;
1136  rtems_id run_task_id;
1137  volatile bool run = false;
1138
1139  puts("test MrsP obtain and sleep and release");
1140
1141  change_prio(RTEMS_SELF, 1);
1142
1143  reset_switch_events(ctx);
1144
1145  sc = rtems_task_create(
1146    rtems_build_name(' ', 'R', 'U', 'N'),
1147    2,
1148    RTEMS_MINIMUM_STACK_SIZE,
1149    RTEMS_DEFAULT_MODES,
1150    RTEMS_DEFAULT_ATTRIBUTES,
1151    &run_task_id
1152  );
1153  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1154
1155  sc = rtems_task_start(run_task_id, run_task, (rtems_task_argument) &run);
1156  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1157
1158  sc = rtems_semaphore_create(
1159    rtems_build_name('S', 'E', 'M', 'A'),
1160    1,
1161    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
1162      | RTEMS_BINARY_SEMAPHORE,
1163    1,
1164    &sem_id
1165  );
1166  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1167
1168  rtems_test_assert(!run);
1169
1170  sc = rtems_task_wake_after(2);
1171  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1172
1173  rtems_test_assert(run);
1174  run = false;
1175
1176  sc = rtems_semaphore_obtain(sem_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1177  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1178
1179  rtems_test_assert(!run);
1180
1181  sc = rtems_task_wake_after(2);
1182  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1183
1184  rtems_test_assert(!run);
1185
1186  sc = rtems_semaphore_release(sem_id);
1187  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1188
1189  print_switch_events(ctx);
1190
1191  sc = rtems_semaphore_delete(sem_id);
1192  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1193
1194  sc = rtems_task_delete(run_task_id);
1195  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1196}
1197
1198static void help_task(rtems_task_argument arg)
1199{
1200  test_context *ctx = &test_instance;
1201  rtems_status_code sc;
1202
1203  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1204  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1205
1206  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
1207  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1208
1209  while (true) {
1210    /* Do nothing */
1211  }
1212}
1213
1214static void test_mrsp_obtain_and_release_with_help(test_context *ctx)
1215{
1216  rtems_status_code sc;
1217  rtems_id help_task_id;
1218  rtems_id run_task_id;
1219  volatile bool run = false;
1220
1221  puts("test MrsP obtain and release with help");
1222
1223  change_prio(RTEMS_SELF, 3);
1224
1225  reset_switch_events(ctx);
1226
1227  sc = rtems_semaphore_create(
1228    rtems_build_name('S', 'E', 'M', 'A'),
1229    1,
1230    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
1231      | RTEMS_BINARY_SEMAPHORE,
1232    2,
1233    &ctx->mrsp_ids[0]
1234  );
1235  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1236
1237  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1238  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1239
1240  assert_prio(RTEMS_SELF, 2);
1241
1242  sc = rtems_task_create(
1243    rtems_build_name('H', 'E', 'L', 'P'),
1244    3,
1245    RTEMS_MINIMUM_STACK_SIZE,
1246    RTEMS_DEFAULT_MODES,
1247    RTEMS_DEFAULT_ATTRIBUTES,
1248    &help_task_id
1249  );
1250  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1251
1252  sc = rtems_task_set_scheduler(
1253    help_task_id,
1254    ctx->scheduler_ids[1]
1255  );
1256  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1257
1258  sc = rtems_task_start(help_task_id, help_task, 0);
1259  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1260
1261  sc = rtems_task_create(
1262    rtems_build_name(' ', 'R', 'U', 'N'),
1263    4,
1264    RTEMS_MINIMUM_STACK_SIZE,
1265    RTEMS_DEFAULT_MODES,
1266    RTEMS_DEFAULT_ATTRIBUTES,
1267    &run_task_id
1268  );
1269  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1270
1271  sc = rtems_task_start(run_task_id, run_task, (rtems_task_argument) &run);
1272  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1273
1274  wait_for_prio(help_task_id, 2);
1275
1276  sc = rtems_task_wake_after(2);
1277  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1278
1279  rtems_test_assert(rtems_get_current_processor() == 0);
1280  rtems_test_assert(!run);
1281
1282  change_prio(run_task_id, 1);
1283
1284  rtems_test_assert(rtems_get_current_processor() == 1);
1285
1286  while (!run) {
1287    /* Wait */
1288  }
1289
1290  sc = rtems_task_wake_after(2);
1291  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1292
1293  rtems_test_assert(rtems_get_current_processor() == 1);
1294
1295  change_prio(run_task_id, 4);
1296
1297  rtems_test_assert(rtems_get_current_processor() == 1);
1298
1299  sc = rtems_task_wake_after(2);
1300  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1301
1302  rtems_test_assert(rtems_get_current_processor() == 1);
1303
1304  /*
1305   * With this operation the scheduler instance 0 has now only the main and the
1306   * idle threads in the ready set.
1307   */
1308  sc = rtems_task_suspend(run_task_id);
1309  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1310
1311  rtems_test_assert(rtems_get_current_processor() == 1);
1312
1313  change_prio(RTEMS_SELF, 1);
1314  change_prio(RTEMS_SELF, 3);
1315
1316  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
1317  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1318
1319  rtems_test_assert(rtems_get_current_processor() == 0);
1320
1321  assert_prio(RTEMS_SELF, 3);
1322
1323  wait_for_prio(help_task_id, 3);
1324
1325  print_switch_events(ctx);
1326
1327  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
1328  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1329
1330  sc = rtems_task_delete(help_task_id);
1331  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1332
1333  sc = rtems_task_delete(run_task_id);
1334  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1335}
1336
1337static uint32_t simple_random(uint32_t v)
1338{
1339  v *= 1664525;
1340  v += 1013904223;
1341
1342  return v;
1343}
1344
1345static rtems_interval timeout(uint32_t v)
1346{
1347  return (v >> 23) % 4;
1348}
1349
1350static void load_worker(rtems_task_argument index)
1351{
1352  test_context *ctx = &test_instance;
1353  rtems_status_code sc;
1354  uint32_t v = index;
1355
1356  while (!ctx->stop_worker[index]) {
1357    uint32_t i = (v >> 13) % MRSP_COUNT;
1358
1359    assert_prio(RTEMS_SELF, 3 + CPU_COUNT + index);
1360
1361    if ((v >> 7) % 1024 == 0) {
1362      /* Give some time to the lower priority tasks */
1363
1364      ++ctx->counters[index].sleep;
1365
1366      sc = rtems_task_wake_after(1);
1367      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1368
1369      ++ctx->counters[index].cpu[rtems_get_current_processor()];
1370    } else {
1371      uint32_t n = (v >> 17) % (i + 1);
1372      uint32_t s;
1373      uint32_t t;
1374
1375      /* Nested obtain */
1376      for (s = 0; s <= n; ++s) {
1377        uint32_t k = i - s;
1378
1379        sc = rtems_semaphore_obtain(ctx->mrsp_ids[k], RTEMS_WAIT, timeout(v));
1380        if (sc == RTEMS_SUCCESSFUL) {
1381          ++ctx->counters[index].obtain[n];
1382
1383          assert_prio(RTEMS_SELF, 3 + k);
1384        } else {
1385          rtems_test_assert(sc == RTEMS_TIMEOUT);
1386
1387          ++ctx->counters[index].timeout;
1388
1389          break;
1390        }
1391
1392        ++ctx->counters[index].cpu[rtems_get_current_processor()];
1393
1394        v = simple_random(v);
1395      }
1396
1397      /* Release in reverse obtain order */
1398      for (t = 0; t < s; ++t) {
1399        uint32_t k = i + t - s + 1;
1400
1401        sc = rtems_semaphore_release(ctx->mrsp_ids[k]);
1402        rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1403
1404        ++ctx->counters[index].cpu[rtems_get_current_processor()];
1405      }
1406    }
1407
1408    v = simple_random(v);
1409  }
1410
1411  sc = rtems_semaphore_release(ctx->counting_sem_id);
1412  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1413
1414  rtems_task_suspend(RTEMS_SELF);
1415  rtems_test_assert(0);
1416}
1417
1418static void migration_task(rtems_task_argument arg)
1419{
1420  test_context *ctx = &test_instance;
1421  rtems_status_code sc;
1422  uint32_t cpu_count = rtems_get_processor_count();
1423  uint32_t v = 0xdeadbeef;
1424
1425  while (true) {
1426    uint32_t cpu_index = (v >> 5) % cpu_count;
1427
1428    sc = rtems_task_set_scheduler(RTEMS_SELF, ctx->scheduler_ids[cpu_index]);
1429    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1430
1431    ++ctx->migration_counters[rtems_get_current_processor()];
1432
1433    v = simple_random(v);
1434  }
1435}
1436
1437static void test_mrsp_load(test_context *ctx)
1438{
1439  rtems_status_code sc;
1440  uint32_t cpu_count = rtems_get_processor_count();
1441  uint32_t index;
1442
1443  puts("test MrsP load");
1444
1445  change_prio(RTEMS_SELF, 2);
1446
1447  sc = rtems_task_create(
1448    rtems_build_name('M', 'I', 'G', 'R'),
1449    2,
1450    RTEMS_MINIMUM_STACK_SIZE,
1451    RTEMS_DEFAULT_MODES,
1452    RTEMS_DEFAULT_ATTRIBUTES,
1453    &ctx->migration_task_id
1454  );
1455  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1456
1457  sc = rtems_task_start(ctx->migration_task_id, migration_task, 0);
1458  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1459
1460  sc = rtems_semaphore_create(
1461    rtems_build_name('S', 'Y', 'N', 'C'),
1462    0,
1463    RTEMS_COUNTING_SEMAPHORE,
1464    0,
1465    &ctx->counting_sem_id
1466  );
1467  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1468
1469  for (index = 0; index < MRSP_COUNT; ++index) {
1470    sc = rtems_semaphore_create(
1471      'A' + index,
1472      1,
1473      RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
1474        | RTEMS_BINARY_SEMAPHORE,
1475      3 + index,
1476      &ctx->mrsp_ids[index]
1477    );
1478    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1479  }
1480
1481  for (index = 0; index < cpu_count; ++index) {
1482    uint32_t a = 2 * index;
1483    uint32_t b = a + 1;
1484
1485    sc = rtems_task_create(
1486      'A' + a,
1487      3 + MRSP_COUNT + a,
1488      RTEMS_MINIMUM_STACK_SIZE,
1489      RTEMS_DEFAULT_MODES,
1490      RTEMS_DEFAULT_ATTRIBUTES,
1491      &ctx->worker_ids[a]
1492    );
1493    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1494
1495    sc = rtems_task_set_scheduler(
1496      ctx->worker_ids[a],
1497      ctx->scheduler_ids[index]
1498    );
1499    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1500
1501    sc = rtems_task_start(
1502      ctx->worker_ids[a],
1503      load_worker,
1504      (rtems_task_argument) a
1505    );
1506    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1507
1508    sc = rtems_task_create(
1509      'A' + b,
1510      3 + MRSP_COUNT + b,
1511      RTEMS_MINIMUM_STACK_SIZE,
1512      RTEMS_DEFAULT_MODES,
1513      RTEMS_DEFAULT_ATTRIBUTES,
1514      &ctx->worker_ids[b]
1515    );
1516    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1517
1518    sc = rtems_task_set_scheduler(
1519      ctx->worker_ids[b],
1520      ctx->scheduler_ids[index]
1521    );
1522    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1523
1524    sc = rtems_task_start(
1525      ctx->worker_ids[b],
1526      load_worker,
1527      (rtems_task_argument) b
1528    );
1529    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1530  }
1531
1532  sc = rtems_task_wake_after(30 * rtems_clock_get_ticks_per_second());
1533  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1534
1535  for (index = 0; index < 2 * cpu_count; ++index) {
1536    ctx->stop_worker[index] = true;
1537  }
1538
1539  for (index = 0; index < 2 * cpu_count; ++index) {
1540    sc = rtems_semaphore_obtain(
1541      ctx->counting_sem_id,
1542      RTEMS_WAIT,
1543      RTEMS_NO_TIMEOUT
1544    );
1545    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1546  }
1547
1548  for (index = 0; index < 2 * cpu_count; ++index) {
1549    sc = rtems_task_delete(ctx->worker_ids[index]);
1550    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1551  }
1552
1553  for (index = 0; index < MRSP_COUNT; ++index) {
1554    sc = rtems_semaphore_delete(ctx->mrsp_ids[index]);
1555    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1556  }
1557
1558  sc = rtems_semaphore_delete(ctx->counting_sem_id);
1559  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1560
1561  sc = rtems_task_delete(ctx->migration_task_id);
1562  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1563
1564  for (index = 0; index < 2 * cpu_count; ++index) {
1565    uint32_t nest_level;
1566    uint32_t cpu_index;
1567
1568    printf(
1569      "worker[%" PRIu32 "]\n"
1570        "  sleep = %" PRIu32 "\n"
1571        "  timeout = %" PRIu32 "\n",
1572      index,
1573      ctx->counters[index].sleep,
1574      ctx->counters[index].timeout
1575    );
1576
1577    for (nest_level = 0; nest_level < MRSP_COUNT; ++nest_level) {
1578      printf(
1579        "  obtain[%" PRIu32 "] = %" PRIu32 "\n",
1580        nest_level,
1581        ctx->counters[index].obtain[nest_level]
1582      );
1583    }
1584
1585    for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
1586      printf(
1587        "  cpu[%" PRIu32 "] = %" PRIu32 "\n",
1588        cpu_index,
1589        ctx->counters[index].cpu[cpu_index]
1590      );
1591    }
1592  }
1593
1594  for (index = 0; index < cpu_count; ++index) {
1595    printf(
1596      "migrations[%" PRIu32 "] = %" PRIu32 "\n",
1597      index,
1598      ctx->migration_counters[index]
1599    );
1600  }
1601}
1602
1603static void Init(rtems_task_argument arg)
1604{
1605  test_context *ctx = &test_instance;
1606  rtems_status_code sc;
1607  rtems_resource_snapshot snapshot;
1608  uint32_t cpu_count = rtems_get_processor_count();
1609  uint32_t cpu_index;
1610
1611  TEST_BEGIN();
1612
1613  rtems_resource_snapshot_take(&snapshot);
1614
1615  ctx->main_task_id = rtems_task_self();
1616
1617  for (cpu_index = 0; cpu_index < 2; ++cpu_index) {
1618    sc = rtems_scheduler_ident(cpu_index, &ctx->scheduler_ids[cpu_index]);
1619    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1620  }
1621
1622  for (cpu_index = 2; cpu_index < cpu_count; ++cpu_index) {
1623    sc = rtems_scheduler_ident(
1624      cpu_index / 2 + 1,
1625      &ctx->scheduler_ids[cpu_index]
1626    );
1627    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1628  }
1629
1630  test_mrsp_flush_error();
1631  test_mrsp_initially_locked_error();
1632  test_mrsp_nested_obtain_error();
1633  test_mrsp_unlock_order_error();
1634  test_mrsp_deadlock_error(ctx);
1635  test_mrsp_multiple_obtain();
1636  test_mrsp_various_block_and_unblock(ctx);
1637  test_mrsp_obtain_and_sleep_and_release(ctx);
1638  test_mrsp_obtain_and_release_with_help(ctx);
1639  test_mrsp_obtain_and_release(ctx);
1640  test_mrsp_load(ctx);
1641
1642  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
1643
1644  TEST_END();
1645  rtems_test_exit(0);
1646}
1647
1648#define CONFIGURE_SMP_APPLICATION
1649
1650#define CONFIGURE_MICROSECONDS_PER_TICK 1000
1651
1652#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
1653#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
1654
1655#define CONFIGURE_MAXIMUM_TASKS (2 * CPU_COUNT + 2)
1656#define CONFIGURE_MAXIMUM_SEMAPHORES (MRSP_COUNT + 1)
1657#define CONFIGURE_MAXIMUM_MRSP_SEMAPHORES MRSP_COUNT
1658#define CONFIGURE_MAXIMUM_TIMERS 1
1659
1660#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
1661
1662#define CONFIGURE_SCHEDULER_SIMPLE_SMP
1663
1664#include <rtems/scheduler.h>
1665
1666RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(0);
1667RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(1);
1668RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(2);
1669RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(3);
1670RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(4);
1671RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(5);
1672RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(6);
1673RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(7);
1674RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(8);
1675RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(9);
1676RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(10);
1677RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(11);
1678RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(12);
1679RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(13);
1680RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(14);
1681RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(15);
1682RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(16);
1683
1684#define CONFIGURE_SCHEDULER_CONTROLS \
1685  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(0, 0), \
1686  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(1, 1), \
1687  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(2, 2), \
1688  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(3, 3), \
1689  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(4, 4), \
1690  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(5, 5), \
1691  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(6, 6), \
1692  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(7, 7), \
1693  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(8, 8), \
1694  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(9, 9), \
1695  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(10, 10), \
1696  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(11, 11), \
1697  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(12, 12), \
1698  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(13, 13), \
1699  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(14, 14), \
1700  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(15, 15), \
1701  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(16, 16)
1702
1703#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
1704  RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
1705  RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
1706  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1707  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1708  RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1709  RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1710  RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1711  RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1712  RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1713  RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1714  RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1715  RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1716  RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1717  RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1718  RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1719  RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1720  RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1721  RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1722  RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1723  RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1724  RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1725  RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1726  RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1727  RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1728  RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1729  RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1730  RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1731  RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1732  RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1733  RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1734  RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1735  RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
1736
1737#define CONFIGURE_INITIAL_EXTENSIONS \
1738  { .thread_switch = switch_extension }, \
1739  RTEMS_TEST_INITIAL_EXTENSION
1740
1741#define CONFIGURE_INIT_TASK_NAME rtems_build_name('M', 'A', 'I', 'N')
1742#define CONFIGURE_INIT_TASK_PRIORITY 2
1743
1744#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
1745
1746#define CONFIGURE_INIT
1747
1748#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.