source: rtems/testsuites/smptests/smpmrsp01/init.c @ 5bd822a7

4.115
Last change on this file since 5bd822a7 was 5bd822a7, checked in by Sebastian Huber <sebastian.huber@…>, on Nov 26, 2014 at 10:51:34 AM

smp: Fix scheduler helping protocol

Ensure that scheduler nodes in the SCHEDULER_HELP_ACTIVE_OWNER or
SCHEDULER_HELP_ACTIVE_RIVAL helping state are always
SCHEDULER_SMP_NODE_READY or SCHEDULER_SMP_NODE_SCHEDULED to ensure the
MrsP protocol properties.

  • Property mode set to 100644
File size: 43.2 KB
Line 
1/*
2 * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#ifdef HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#include <stdio.h>
20#include <inttypes.h>
21
22#include <rtems.h>
23#include <rtems/libcsupport.h>
24#include <rtems/score/schedulersmpimpl.h>
25#include <rtems/score/smpbarrier.h>
26#include <rtems/score/smplock.h>
27
28#define TESTS_USE_PRINTK
29#include "tmacros.h"
30
31const char rtems_test_name[] = "SMPMRSP 1";
32
33#define CPU_COUNT 32
34
35#define MRSP_COUNT 32
36
37#define SWITCH_EVENT_COUNT 32
38
39typedef struct {
40  uint32_t sleep;
41  uint32_t timeout;
42  uint32_t obtain[MRSP_COUNT];
43  uint32_t cpu[CPU_COUNT];
44} counter;
45
46typedef struct {
47  uint32_t cpu_index;
48  const Thread_Control *executing;
49  const Thread_Control *heir;
50  const Thread_Control *heir_node;
51  Priority_Control heir_priority;
52} switch_event;
53
54typedef struct {
55  rtems_id main_task_id;
56  rtems_id migration_task_id;
57  rtems_id low_task_id[2];
58  rtems_id high_task_id[2];
59  rtems_id timer_id;
60  rtems_id counting_sem_id;
61  rtems_id mrsp_ids[MRSP_COUNT];
62  rtems_id scheduler_ids[CPU_COUNT];
63  rtems_id worker_ids[2 * CPU_COUNT];
64  volatile bool stop_worker[CPU_COUNT];
65  counter counters[2 * CPU_COUNT];
66  uint32_t migration_counters[CPU_COUNT];
67  Thread_Control *worker_task;
68  SMP_barrier_Control barrier;
69  SMP_lock_Control switch_lock;
70  size_t switch_index;
71  switch_event switch_events[32];
72  volatile bool high_run[2];
73  volatile bool low_run[2];
74} test_context;
75
76static test_context test_instance = {
77  .barrier = SMP_BARRIER_CONTROL_INITIALIZER,
78  .switch_lock = SMP_LOCK_INITIALIZER("test instance switch lock")
79};
80
81static void barrier(test_context *ctx, SMP_barrier_State *bs)
82{
83  _SMP_barrier_Wait(&ctx->barrier, bs, 2);
84}
85
86static rtems_task_priority get_prio(rtems_id task_id)
87{
88  rtems_status_code sc;
89  rtems_task_priority prio;
90
91  sc = rtems_task_set_priority(task_id, RTEMS_CURRENT_PRIORITY, &prio);
92  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
93
94  return prio;
95}
96
97static void wait_for_prio(rtems_id task_id, rtems_task_priority prio)
98{
99  while (get_prio(task_id) != prio) {
100    /* Wait */
101  }
102}
103
104static void assert_prio(rtems_id task_id, rtems_task_priority expected_prio)
105{
106  rtems_test_assert(get_prio(task_id) == expected_prio);
107}
108
109static void change_prio(rtems_id task_id, rtems_task_priority prio)
110{
111  rtems_status_code sc;
112
113  sc = rtems_task_set_priority(task_id, prio, &prio);
114  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
115}
116
117static void assert_executing_worker(test_context *ctx)
118{
119  rtems_test_assert(
120    _CPU_Context_Get_is_executing(&ctx->worker_task->Registers)
121  );
122}
123
124static void switch_extension(Thread_Control *executing, Thread_Control *heir)
125{
126  test_context *ctx = &test_instance;
127  SMP_lock_Context lock_context;
128  size_t i;
129
130  _SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
131
132  i = ctx->switch_index;
133  if (i < SWITCH_EVENT_COUNT) {
134    switch_event *e = &ctx->switch_events[i];
135    Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node(heir);
136
137    e->cpu_index = rtems_get_current_processor();
138    e->executing = executing;
139    e->heir = heir;
140    e->heir_node = _Scheduler_Node_get_owner(&node->Base);
141    e->heir_priority = node->priority;
142
143    ctx->switch_index = i + 1;
144  }
145
146  _SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
147}
148
149static void reset_switch_events(test_context *ctx)
150{
151  SMP_lock_Context lock_context;
152
153  _SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
154  ctx->switch_index = 0;
155  _SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
156}
157
158static size_t get_switch_events(test_context *ctx)
159{
160  SMP_lock_Context lock_context;
161  size_t events;
162
163  _SMP_lock_ISR_disable_and_acquire(&ctx->switch_lock, &lock_context);
164  events = ctx->switch_index;
165  _SMP_lock_Release_and_ISR_enable(&ctx->switch_lock, &lock_context);
166
167  return events;
168}
169
170static void print_switch_events(test_context *ctx)
171{
172  size_t n = get_switch_events(ctx);
173  size_t i;
174
175  for (i = 0; i < n; ++i) {
176    switch_event *e = &ctx->switch_events[i];
177    char ex[5];
178    char hr[5];
179    char hn[5];
180
181    rtems_object_get_name(e->executing->Object.id, sizeof(ex), &ex[0]);
182    rtems_object_get_name(e->heir->Object.id, sizeof(hr), &hr[0]);
183    rtems_object_get_name(e->heir_node->Object.id, sizeof(hn), &hn[0]);
184
185    printf(
186      "[%" PRIu32 "] %4s -> %4s (prio %3" PRIu32 ", node %4s)\n",
187      e->cpu_index,
188      &ex[0],
189      &hr[0],
190      e->heir_priority,
191      &hn[0]
192    );
193  }
194}
195
196static void obtain_and_release_worker(rtems_task_argument arg)
197{
198  test_context *ctx = &test_instance;
199  rtems_status_code sc;
200  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
201
202  ctx->worker_task = _Thread_Get_executing();
203
204  assert_prio(RTEMS_SELF, 3);
205
206  /* Obtain with timeout (A) */
207  barrier(ctx, &barrier_state);
208
209  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
210  rtems_test_assert(sc == RTEMS_TIMEOUT);
211
212  assert_prio(RTEMS_SELF, 3);
213
214  /* Obtain with priority change and timeout (B) */
215  barrier(ctx, &barrier_state);
216
217  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
218  rtems_test_assert(sc == RTEMS_TIMEOUT);
219
220  assert_prio(RTEMS_SELF, 1);
221
222  /* Restore priority (C) */
223  barrier(ctx, &barrier_state);
224
225  /* Obtain without timeout (D) */
226  barrier(ctx, &barrier_state);
227
228  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
229  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
230
231  assert_prio(RTEMS_SELF, 2);
232
233  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
234  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
235
236  assert_prio(RTEMS_SELF, 3);
237
238  /* Worker done (E) */
239  barrier(ctx, &barrier_state);
240
241  rtems_task_suspend(RTEMS_SELF);
242  rtems_test_assert(0);
243}
244
245static void test_mrsp_obtain_and_release(test_context *ctx)
246{
247  rtems_status_code sc;
248  rtems_task_priority prio;
249  rtems_id scheduler_id;
250  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
251
252  puts("test MrsP obtain and release");
253
254  change_prio(RTEMS_SELF, 2);
255
256  /* Check executing task parameters */
257
258  sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
259  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
260
261  rtems_test_assert(ctx->scheduler_ids[0] == scheduler_id);
262
263  /* Create a MrsP semaphore object and lock it */
264
265  sc = rtems_semaphore_create(
266    rtems_build_name('M', 'R', 'S', 'P'),
267    1,
268    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
269      | RTEMS_BINARY_SEMAPHORE,
270    1,
271    &ctx->mrsp_ids[0]
272  );
273  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
274
275  assert_prio(RTEMS_SELF, 2);
276
277  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
278  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
279
280  assert_prio(RTEMS_SELF, 1);
281
282  /*
283   * The ceiling priority values per scheduler are equal to the value specified
284   * for object creation.
285   */
286
287  prio = RTEMS_CURRENT_PRIORITY;
288  sc = rtems_semaphore_set_priority(
289    ctx->mrsp_ids[0],
290    ctx->scheduler_ids[0],
291    prio,
292    &prio
293  );
294  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
295  rtems_test_assert(prio == 1);
296
297  /* Check the old value and set a new ceiling priority for scheduler B */
298
299  prio = 2;
300  sc = rtems_semaphore_set_priority(
301    ctx->mrsp_ids[0],
302    ctx->scheduler_ids[1],
303    prio,
304    &prio
305  );
306  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
307  rtems_test_assert(prio == 1);
308
309  /* Check the ceiling priority values */
310
311  prio = RTEMS_CURRENT_PRIORITY;
312  sc = rtems_semaphore_set_priority(
313    ctx->mrsp_ids[0],
314    ctx->scheduler_ids[0],
315    prio,
316    &prio
317  );
318  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
319  rtems_test_assert(prio == 1);
320
321  prio = RTEMS_CURRENT_PRIORITY;
322  sc = rtems_semaphore_set_priority(
323    ctx->mrsp_ids[0],
324    ctx->scheduler_ids[1],
325    prio,
326    &prio
327  );
328  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
329  rtems_test_assert(prio == 2);
330
331  /* Check that a thread waiting to get ownership remains executing */
332
333  sc = rtems_task_create(
334    rtems_build_name('W', 'O', 'R', 'K'),
335    3,
336    RTEMS_MINIMUM_STACK_SIZE,
337    RTEMS_DEFAULT_MODES,
338    RTEMS_DEFAULT_ATTRIBUTES,
339    &ctx->worker_ids[0]
340  );
341  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
342
343  sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
344  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
345
346  sc = rtems_task_start(ctx->worker_ids[0], obtain_and_release_worker, 0);
347  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
348
349  /* Obtain with timeout (A) */
350  barrier(ctx, &barrier_state);
351
352  sc = rtems_task_wake_after(2);
353  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
354
355  assert_prio(ctx->worker_ids[0], 2);
356  assert_executing_worker(ctx);
357
358  /* Obtain with priority change and timeout (B) */
359  barrier(ctx, &barrier_state);
360
361  sc = rtems_task_wake_after(2);
362  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
363
364  assert_prio(ctx->worker_ids[0], 2);
365  change_prio(ctx->worker_ids[0], 1);
366  assert_executing_worker(ctx);
367
368  /* Restore priority (C) */
369  barrier(ctx, &barrier_state);
370
371  assert_prio(ctx->worker_ids[0], 1);
372  change_prio(ctx->worker_ids[0], 3);
373
374  /* Obtain without timeout (D) */
375  barrier(ctx, &barrier_state);
376
377  sc = rtems_task_wake_after(2);
378  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
379
380  assert_prio(ctx->worker_ids[0], 2);
381  assert_executing_worker(ctx);
382
383  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
384  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
385
386  /* Worker done (E) */
387  barrier(ctx, &barrier_state);
388
389  sc = rtems_task_delete(ctx->worker_ids[0]);
390  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
391
392  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
393  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
394}
395
396static void test_mrsp_flush_error(void)
397{
398  rtems_status_code sc;
399  rtems_id id;
400
401  puts("test MrsP flush error");
402
403  sc = rtems_semaphore_create(
404    rtems_build_name('M', 'R', 'S', 'P'),
405    1,
406    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
407      | RTEMS_BINARY_SEMAPHORE,
408    1,
409    &id
410  );
411  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
412
413  sc = rtems_semaphore_flush(id);
414  rtems_test_assert(sc == RTEMS_NOT_DEFINED);
415
416  sc = rtems_semaphore_delete(id);
417  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
418}
419
420static void test_mrsp_initially_locked_error(void)
421{
422  rtems_status_code sc;
423  rtems_id id;
424
425  puts("test MrsP initially locked error");
426
427  sc = rtems_semaphore_create(
428    rtems_build_name('M', 'R', 'S', 'P'),
429    0,
430    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
431      | RTEMS_BINARY_SEMAPHORE,
432    1,
433    &id
434  );
435  rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
436}
437
438static void test_mrsp_nested_obtain_error(void)
439{
440  rtems_status_code sc;
441  rtems_id id;
442
443  puts("test MrsP nested obtain error");
444
445  sc = rtems_semaphore_create(
446    rtems_build_name('M', 'R', 'S', 'P'),
447    1,
448    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
449      | RTEMS_BINARY_SEMAPHORE,
450    1,
451    &id
452  );
453  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
454
455  sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
456  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
457
458  sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
459  rtems_test_assert(sc == RTEMS_UNSATISFIED);
460
461  sc = rtems_semaphore_release(id);
462  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
463
464  sc = rtems_semaphore_delete(id);
465  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
466}
467
468static void test_mrsp_unlock_order_error(void)
469{
470  rtems_status_code sc;
471  rtems_id id_a;
472  rtems_id id_b;
473
474  puts("test MrsP unlock order error");
475
476  sc = rtems_semaphore_create(
477    rtems_build_name(' ', ' ', ' ', 'A'),
478    1,
479    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
480      | RTEMS_BINARY_SEMAPHORE,
481    1,
482    &id_a
483  );
484  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
485
486  sc = rtems_semaphore_create(
487    rtems_build_name(' ', ' ', ' ', 'B'),
488    1,
489    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
490      | RTEMS_BINARY_SEMAPHORE,
491    1,
492    &id_b
493  );
494  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
495
496  sc = rtems_semaphore_obtain(id_a, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
497  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
498
499  sc = rtems_semaphore_obtain(id_b, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
500  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
501
502  sc = rtems_semaphore_release(id_a);
503  rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
504
505  sc = rtems_semaphore_release(id_b);
506  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
507
508  sc = rtems_semaphore_release(id_a);
509  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
510
511  sc = rtems_semaphore_delete(id_a);
512  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
513
514  sc = rtems_semaphore_delete(id_b);
515  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
516}
517
518static void deadlock_worker(rtems_task_argument arg)
519{
520  test_context *ctx = &test_instance;
521  rtems_status_code sc;
522
523  sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
524  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
525
526  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
527  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
528
529  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
530  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
531
532  sc = rtems_semaphore_release(ctx->mrsp_ids[1]);
533  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
534
535  sc = rtems_event_transient_send(ctx->main_task_id);
536  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
537
538  rtems_task_suspend(RTEMS_SELF);
539  rtems_test_assert(0);
540}
541
542static void test_mrsp_deadlock_error(test_context *ctx)
543{
544  rtems_status_code sc;
545  rtems_task_priority prio = 2;
546
547  puts("test MrsP deadlock error");
548
549  change_prio(RTEMS_SELF, prio);
550
551  sc = rtems_semaphore_create(
552    rtems_build_name(' ', ' ', ' ', 'A'),
553    1,
554    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
555      | RTEMS_BINARY_SEMAPHORE,
556    prio,
557    &ctx->mrsp_ids[0]
558  );
559  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
560
561  sc = rtems_semaphore_create(
562    rtems_build_name(' ', ' ', ' ', 'B'),
563    1,
564    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
565      | RTEMS_BINARY_SEMAPHORE,
566    prio,
567    &ctx->mrsp_ids[1]
568  );
569  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
570
571  sc = rtems_task_create(
572    rtems_build_name('W', 'O', 'R', 'K'),
573    prio,
574    RTEMS_MINIMUM_STACK_SIZE,
575    RTEMS_DEFAULT_MODES,
576    RTEMS_DEFAULT_ATTRIBUTES,
577    &ctx->worker_ids[0]
578  );
579  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
580
581  sc = rtems_task_start(ctx->worker_ids[0], deadlock_worker, 0);
582  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
583
584  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
585  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
586
587  sc = rtems_task_wake_after(RTEMS_YIELD_PROCESSOR);
588  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
589
590  sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
591  rtems_test_assert(sc == RTEMS_UNSATISFIED);
592
593  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
594  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
595
596  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
597  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
598
599  sc = rtems_task_delete(ctx->worker_ids[0]);
600  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
601
602  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
603  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
604
605  sc = rtems_semaphore_delete(ctx->mrsp_ids[1]);
606  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
607}
608
609static void test_mrsp_multiple_obtain(void)
610{
611  rtems_status_code sc;
612  rtems_id sem_a_id;
613  rtems_id sem_b_id;
614  rtems_id sem_c_id;
615
616  puts("test MrsP multiple obtain");
617
618  change_prio(RTEMS_SELF, 4);
619
620  sc = rtems_semaphore_create(
621    rtems_build_name(' ', ' ', ' ', 'A'),
622    1,
623    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
624      | RTEMS_BINARY_SEMAPHORE,
625    3,
626    &sem_a_id
627  );
628  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
629
630  sc = rtems_semaphore_create(
631    rtems_build_name(' ', ' ', ' ', 'B'),
632    1,
633    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
634      | RTEMS_BINARY_SEMAPHORE,
635    2,
636    &sem_b_id
637  );
638  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
639
640  sc = rtems_semaphore_create(
641    rtems_build_name(' ', ' ', ' ', 'C'),
642    1,
643    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
644      | RTEMS_BINARY_SEMAPHORE,
645    1,
646    &sem_c_id
647  );
648  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
649
650  assert_prio(RTEMS_SELF, 4);
651
652  sc = rtems_semaphore_obtain(sem_a_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
653  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
654
655  assert_prio(RTEMS_SELF, 3);
656
657  sc = rtems_semaphore_obtain(sem_b_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
658  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
659
660  assert_prio(RTEMS_SELF, 2);
661
662  sc = rtems_semaphore_obtain(sem_c_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
663  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
664
665  assert_prio(RTEMS_SELF, 1);
666
667  sc = rtems_semaphore_release(sem_c_id);
668  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
669
670  assert_prio(RTEMS_SELF, 2);
671
672  sc = rtems_semaphore_release(sem_b_id);
673  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
674
675  assert_prio(RTEMS_SELF, 3);
676
677  sc = rtems_semaphore_release(sem_a_id);
678  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
679
680  assert_prio(RTEMS_SELF, 4);
681
682  sc = rtems_semaphore_obtain(sem_a_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
683  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
684
685  assert_prio(RTEMS_SELF, 3);
686
687  sc = rtems_semaphore_obtain(sem_b_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
688  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
689
690  assert_prio(RTEMS_SELF, 2);
691
692  sc = rtems_semaphore_obtain(sem_c_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
693  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
694
695  assert_prio(RTEMS_SELF, 1);
696  change_prio(RTEMS_SELF, 3);
697  assert_prio(RTEMS_SELF, 1);
698
699  sc = rtems_semaphore_release(sem_c_id);
700  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
701
702  assert_prio(RTEMS_SELF, 2);
703
704  sc = rtems_semaphore_release(sem_b_id);
705  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
706
707  assert_prio(RTEMS_SELF, 3);
708
709  sc = rtems_semaphore_release(sem_a_id);
710  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
711
712  assert_prio(RTEMS_SELF, 3);
713
714  sc = rtems_semaphore_delete(sem_a_id);
715  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
716
717  sc = rtems_semaphore_delete(sem_b_id);
718  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
719
720  sc = rtems_semaphore_delete(sem_c_id);
721  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
722}
723
724static void run_task(rtems_task_argument arg)
725{
726  volatile bool *run = (volatile bool *) arg;
727
728  while (true) {
729    *run = true;
730  }
731}
732
733static void ready_unlock_worker(rtems_task_argument arg)
734{
735  test_context *ctx = &test_instance;
736  rtems_status_code sc;
737  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
738
739  assert_prio(RTEMS_SELF, 4);
740
741  /* Obtain (F) */
742  barrier(ctx, &barrier_state);
743
744  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
745  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
746
747  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
748  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
749
750  assert_prio(RTEMS_SELF, 4);
751
752  /* Done (G) */
753  barrier(ctx, &barrier_state);
754
755  while (true) {
756    /* Do nothing */
757  }
758}
759
760static void unblock_ready_timer(rtems_id timer_id, void *arg)
761{
762  test_context *ctx = arg;
763  rtems_status_code sc;
764
765  sc = rtems_task_start(
766    ctx->high_task_id[0],
767    run_task,
768    (rtems_task_argument) &ctx->high_run[0]
769  );
770  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
771
772  sc = rtems_task_suspend(ctx->high_task_id[0]);
773  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
774
775  sc = rtems_task_resume(ctx->high_task_id[0]);
776  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
777
778  /*
779   * At this point the scheduler node of the main thread is in the
780   * SCHEDULER_SMP_NODE_READY state and a _Scheduler_SMP_Unblock() operation is
781   * performed.
782   */
783  sc = rtems_event_transient_send(ctx->main_task_id);
784  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
785
786  sc = rtems_task_suspend(ctx->high_task_id[0]);
787  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
788}
789
790static void unblock_ready_owner(test_context *ctx)
791{
792  rtems_status_code sc;
793
794  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
795  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
796
797  assert_prio(RTEMS_SELF, 3);
798
799  sc = rtems_timer_fire_after(ctx->timer_id, 2, unblock_ready_timer, ctx);
800  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
801
802  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
803  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
804
805  rtems_test_assert(!ctx->high_run[0]);
806}
807
808static void unblock_owner_before_rival_timer(rtems_id timer_id, void *arg)
809{
810  test_context *ctx = arg;
811  rtems_status_code sc;
812
813  sc = rtems_task_suspend(ctx->high_task_id[0]);
814  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
815
816  sc = rtems_task_suspend(ctx->high_task_id[1]);
817  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
818}
819
820static void unblock_owner_after_rival_timer(rtems_id timer_id, void *arg)
821{
822  test_context *ctx = arg;
823  rtems_status_code sc;
824
825  sc = rtems_task_suspend(ctx->high_task_id[1]);
826  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
827
828  sc = rtems_task_suspend(ctx->high_task_id[0]);
829  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
830}
831
832static void various_block_unblock(test_context *ctx)
833{
834  rtems_status_code sc;
835  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
836
837  /* Worker obtain (F) */
838  barrier(ctx, &barrier_state);
839
840  sc = rtems_task_wake_after(2);
841  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
842
843  sc = rtems_task_suspend(ctx->worker_ids[0]);
844  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
845
846  sc = rtems_task_wake_after(2);
847  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
848
849  sc = rtems_task_start(
850    ctx->high_task_id[1],
851    run_task,
852    (rtems_task_argument) &ctx->high_run[1]
853  );
854  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
855
856  while (!ctx->high_run[1]) {
857    /* Do noting */
858  }
859
860  sc = rtems_task_resume(ctx->worker_ids[0]);
861  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
862
863  /* Try to schedule a blocked active rival */
864
865  sc = rtems_task_suspend(ctx->worker_ids[0]);
866  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
867
868  sc = rtems_task_suspend(ctx->high_task_id[1]);
869  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
870
871  sc = rtems_task_resume(ctx->high_task_id[1]);
872  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
873
874  sc = rtems_task_resume(ctx->worker_ids[0]);
875  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
876
877  rtems_test_assert(rtems_get_current_processor() == 0);
878
879  /* Use node of the active rival */
880
881  sc = rtems_task_suspend(ctx->high_task_id[1]);
882  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
883
884  sc = rtems_task_resume(ctx->high_task_id[0]);
885  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
886
887  rtems_test_assert(rtems_get_current_processor() == 1);
888
889  sc = rtems_task_suspend(ctx->worker_ids[0]);
890  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
891
892  sc = rtems_task_resume(ctx->worker_ids[0]);
893  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
894
895  /*
896   * Try to schedule an active rival with an already scheduled active owner
897   * user.
898   */
899
900  sc = rtems_timer_fire_after(
901    ctx->timer_id,
902    2,
903    unblock_owner_before_rival_timer,
904    ctx
905  );
906  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
907
908  /* This will take the processor away from us, the timer will help later */
909  sc = rtems_task_resume(ctx->high_task_id[1]);
910  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
911
912  /*
913   * Try to schedule an active owner with an already scheduled active rival
914   * user.
915   */
916
917  sc = rtems_task_resume(ctx->high_task_id[0]);
918  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
919
920  sc = rtems_timer_fire_after(
921    ctx->timer_id,
922    2,
923    unblock_owner_after_rival_timer,
924    ctx
925  );
926  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
927
928  /* This will take the processor away from us, the timer will help later */
929  sc = rtems_task_resume(ctx->high_task_id[1]);
930  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
931
932  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
933  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
934
935  rtems_test_assert(rtems_get_current_processor() == 0);
936
937  assert_prio(RTEMS_SELF, 4);
938
939  /* Worker done (G) */
940  barrier(ctx, &barrier_state);
941}
942
943static void start_low_task(test_context *ctx, size_t i)
944{
945  rtems_status_code sc;
946
947  sc = rtems_task_create(
948    rtems_build_name('L', 'O', 'W', '0' + i),
949    5,
950    RTEMS_MINIMUM_STACK_SIZE,
951    RTEMS_DEFAULT_MODES,
952    RTEMS_DEFAULT_ATTRIBUTES,
953    &ctx->low_task_id[i]
954  );
955  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
956
957  sc = rtems_task_set_scheduler(ctx->low_task_id[i], ctx->scheduler_ids[i]);
958  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
959
960  sc = rtems_task_start(
961    ctx->low_task_id[i],
962    run_task,
963    (rtems_task_argument) &ctx->low_run[i]
964  );
965  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
966}
967
968static void test_mrsp_various_block_and_unblock(test_context *ctx)
969{
970  rtems_status_code sc;
971
972  puts("test MrsP various block and unblock");
973
974  change_prio(RTEMS_SELF, 4);
975
976  reset_switch_events(ctx);
977
978  ctx->low_run[0] = false;
979  ctx->low_run[1] = false;
980  ctx->high_run[0] = false;
981  ctx->high_run[1] = false;
982
983  sc = rtems_semaphore_create(
984    rtems_build_name(' ', ' ', ' ', 'A'),
985    1,
986    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
987      | RTEMS_BINARY_SEMAPHORE,
988    3,
989    &ctx->mrsp_ids[0]
990  );
991  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
992
993  assert_prio(RTEMS_SELF, 4);
994
995  sc = rtems_task_create(
996    rtems_build_name('H', 'I', 'G', '0'),
997    2,
998    RTEMS_MINIMUM_STACK_SIZE,
999    RTEMS_DEFAULT_MODES,
1000    RTEMS_DEFAULT_ATTRIBUTES,
1001    &ctx->high_task_id[0]
1002  );
1003  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1004
1005  sc = rtems_task_create(
1006    rtems_build_name('H', 'I', 'G', '1'),
1007    2,
1008    RTEMS_MINIMUM_STACK_SIZE,
1009    RTEMS_DEFAULT_MODES,
1010    RTEMS_DEFAULT_ATTRIBUTES,
1011    &ctx->high_task_id[1]
1012  );
1013  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1014
1015  sc = rtems_task_set_scheduler(ctx->high_task_id[1], ctx->scheduler_ids[1]);
1016  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1017
1018  sc = rtems_task_create(
1019    rtems_build_name('W', 'O', 'R', 'K'),
1020    4,
1021    RTEMS_MINIMUM_STACK_SIZE,
1022    RTEMS_DEFAULT_MODES,
1023    RTEMS_DEFAULT_ATTRIBUTES,
1024    &ctx->worker_ids[0]
1025  );
1026  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1027
1028  sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
1029  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1030
1031  sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
1032  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1033
1034  sc = rtems_timer_create(
1035    rtems_build_name('T', 'I', 'M', 'R'),
1036    &ctx->timer_id
1037  );
1038  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1039
1040  /* In case these tasks run, then we have a MrsP protocol violation */
1041  start_low_task(ctx, 0);
1042  start_low_task(ctx, 1);
1043
1044  unblock_ready_owner(ctx);
1045  various_block_unblock(ctx);
1046
1047  rtems_test_assert(!ctx->low_run[0]);
1048  rtems_test_assert(!ctx->low_run[1]);
1049
1050  print_switch_events(ctx);
1051
1052  sc = rtems_timer_delete(ctx->timer_id);
1053  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1054
1055  sc = rtems_task_delete(ctx->high_task_id[0]);
1056  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1057
1058  sc = rtems_task_delete(ctx->high_task_id[1]);
1059  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1060
1061  sc = rtems_task_delete(ctx->worker_ids[0]);
1062  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1063
1064  sc = rtems_task_delete(ctx->low_task_id[0]);
1065  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1066
1067  sc = rtems_task_delete(ctx->low_task_id[1]);
1068  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1069
1070  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
1071  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1072}
1073
1074static void test_mrsp_obtain_and_sleep_and_release(test_context *ctx)
1075{
1076  rtems_status_code sc;
1077  rtems_id sem_id;
1078  rtems_id run_task_id;
1079  volatile bool run = false;
1080
1081  puts("test MrsP obtain and sleep and release");
1082
1083  change_prio(RTEMS_SELF, 1);
1084
1085  reset_switch_events(ctx);
1086
1087  sc = rtems_task_create(
1088    rtems_build_name(' ', 'R', 'U', 'N'),
1089    2,
1090    RTEMS_MINIMUM_STACK_SIZE,
1091    RTEMS_DEFAULT_MODES,
1092    RTEMS_DEFAULT_ATTRIBUTES,
1093    &run_task_id
1094  );
1095  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1096
1097  sc = rtems_task_start(run_task_id, run_task, (rtems_task_argument) &run);
1098  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1099
1100  sc = rtems_semaphore_create(
1101    rtems_build_name('S', 'E', 'M', 'A'),
1102    1,
1103    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
1104      | RTEMS_BINARY_SEMAPHORE,
1105    1,
1106    &sem_id
1107  );
1108  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1109
1110  rtems_test_assert(!run);
1111
1112  sc = rtems_task_wake_after(2);
1113  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1114
1115  rtems_test_assert(run);
1116  run = false;
1117
1118  sc = rtems_semaphore_obtain(sem_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1119  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1120
1121  rtems_test_assert(!run);
1122
1123  sc = rtems_task_wake_after(2);
1124  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1125
1126  rtems_test_assert(!run);
1127
1128  sc = rtems_semaphore_release(sem_id);
1129  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1130
1131  print_switch_events(ctx);
1132
1133  sc = rtems_semaphore_delete(sem_id);
1134  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1135
1136  sc = rtems_task_delete(run_task_id);
1137  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1138}
1139
1140static void help_task(rtems_task_argument arg)
1141{
1142  test_context *ctx = &test_instance;
1143  rtems_status_code sc;
1144
1145  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1146  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1147
1148  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
1149  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1150
1151  while (true) {
1152    /* Do nothing */
1153  }
1154}
1155
1156static void test_mrsp_obtain_and_release_with_help(test_context *ctx)
1157{
1158  rtems_status_code sc;
1159  rtems_id help_task_id;
1160  rtems_id run_task_id;
1161  volatile bool run = false;
1162
1163  puts("test MrsP obtain and release with help");
1164
1165  change_prio(RTEMS_SELF, 3);
1166
1167  reset_switch_events(ctx);
1168
1169  sc = rtems_semaphore_create(
1170    rtems_build_name('S', 'E', 'M', 'A'),
1171    1,
1172    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
1173      | RTEMS_BINARY_SEMAPHORE,
1174    2,
1175    &ctx->mrsp_ids[0]
1176  );
1177  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1178
1179  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
1180  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1181
1182  assert_prio(RTEMS_SELF, 2);
1183
1184  sc = rtems_task_create(
1185    rtems_build_name('H', 'E', 'L', 'P'),
1186    3,
1187    RTEMS_MINIMUM_STACK_SIZE,
1188    RTEMS_DEFAULT_MODES,
1189    RTEMS_DEFAULT_ATTRIBUTES,
1190    &help_task_id
1191  );
1192  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1193
1194  sc = rtems_task_set_scheduler(
1195    help_task_id,
1196    ctx->scheduler_ids[1]
1197  );
1198  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1199
1200  sc = rtems_task_start(help_task_id, help_task, 0);
1201  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1202
1203  sc = rtems_task_create(
1204    rtems_build_name(' ', 'R', 'U', 'N'),
1205    4,
1206    RTEMS_MINIMUM_STACK_SIZE,
1207    RTEMS_DEFAULT_MODES,
1208    RTEMS_DEFAULT_ATTRIBUTES,
1209    &run_task_id
1210  );
1211  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1212
1213  sc = rtems_task_start(run_task_id, run_task, (rtems_task_argument) &run);
1214  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1215
1216  wait_for_prio(help_task_id, 2);
1217
1218  sc = rtems_task_wake_after(2);
1219  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1220
1221  rtems_test_assert(rtems_get_current_processor() == 0);
1222  rtems_test_assert(!run);
1223
1224  change_prio(run_task_id, 1);
1225
1226  rtems_test_assert(rtems_get_current_processor() == 1);
1227
1228  while (!run) {
1229    /* Wait */
1230  }
1231
1232  sc = rtems_task_wake_after(2);
1233  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1234
1235  rtems_test_assert(rtems_get_current_processor() == 1);
1236
1237  change_prio(run_task_id, 4);
1238
1239  rtems_test_assert(rtems_get_current_processor() == 1);
1240
1241  sc = rtems_task_wake_after(2);
1242  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1243
1244  rtems_test_assert(rtems_get_current_processor() == 1);
1245
1246  /*
1247   * With this operation the scheduler instance 0 has now only the main and the
1248   * idle threads in the ready set.
1249   */
1250  sc = rtems_task_suspend(run_task_id);
1251  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1252
1253  rtems_test_assert(rtems_get_current_processor() == 1);
1254
1255  change_prio(RTEMS_SELF, 1);
1256  change_prio(RTEMS_SELF, 3);
1257
1258  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
1259  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1260
1261  rtems_test_assert(rtems_get_current_processor() == 0);
1262
1263  assert_prio(RTEMS_SELF, 3);
1264
1265  wait_for_prio(help_task_id, 3);
1266
1267  print_switch_events(ctx);
1268
1269  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
1270  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1271
1272  sc = rtems_task_delete(help_task_id);
1273  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1274
1275  sc = rtems_task_delete(run_task_id);
1276  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1277}
1278
1279static uint32_t simple_random(uint32_t v)
1280{
1281  v *= 1664525;
1282  v += 1013904223;
1283
1284  return v;
1285}
1286
1287static rtems_interval timeout(uint32_t v)
1288{
1289  return (v >> 23) % 4;
1290}
1291
1292static void load_worker(rtems_task_argument index)
1293{
1294  test_context *ctx = &test_instance;
1295  rtems_status_code sc;
1296  uint32_t v = index;
1297
1298  while (!ctx->stop_worker[index]) {
1299    uint32_t i = (v >> 13) % MRSP_COUNT;
1300
1301    assert_prio(RTEMS_SELF, 3 + CPU_COUNT + index);
1302
1303    if ((v >> 7) % 1024 == 0) {
1304      /* Give some time to the lower priority tasks */
1305
1306      ++ctx->counters[index].sleep;
1307
1308      sc = rtems_task_wake_after(1);
1309      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1310
1311      ++ctx->counters[index].cpu[rtems_get_current_processor()];
1312    } else {
1313      uint32_t n = (v >> 17) % (i + 1);
1314      uint32_t s;
1315      uint32_t t;
1316
1317      /* Nested obtain */
1318      for (s = 0; s <= n; ++s) {
1319        uint32_t k = i - s;
1320
1321        sc = rtems_semaphore_obtain(ctx->mrsp_ids[k], RTEMS_WAIT, timeout(v));
1322        if (sc == RTEMS_SUCCESSFUL) {
1323          ++ctx->counters[index].obtain[n];
1324
1325          assert_prio(RTEMS_SELF, 3 + k);
1326        } else {
1327          rtems_test_assert(sc == RTEMS_TIMEOUT);
1328
1329          ++ctx->counters[index].timeout;
1330
1331          break;
1332        }
1333
1334        ++ctx->counters[index].cpu[rtems_get_current_processor()];
1335
1336        v = simple_random(v);
1337      }
1338
1339      /* Release in reverse obtain order */
1340      for (t = 0; t < s; ++t) {
1341        uint32_t k = i + t - s + 1;
1342
1343        sc = rtems_semaphore_release(ctx->mrsp_ids[k]);
1344        rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1345
1346        ++ctx->counters[index].cpu[rtems_get_current_processor()];
1347      }
1348    }
1349
1350    v = simple_random(v);
1351  }
1352
1353  sc = rtems_semaphore_release(ctx->counting_sem_id);
1354  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1355
1356  rtems_task_suspend(RTEMS_SELF);
1357  rtems_test_assert(0);
1358}
1359
1360static void migration_task(rtems_task_argument arg)
1361{
1362  test_context *ctx = &test_instance;
1363  rtems_status_code sc;
1364  uint32_t cpu_count = rtems_get_processor_count();
1365  uint32_t v = 0xdeadbeef;
1366
1367  while (true) {
1368    uint32_t cpu_index = (v >> 5) % cpu_count;
1369
1370    sc = rtems_task_set_scheduler(RTEMS_SELF, ctx->scheduler_ids[cpu_index]);
1371    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1372
1373    ++ctx->migration_counters[rtems_get_current_processor()];
1374
1375    v = simple_random(v);
1376  }
1377}
1378
1379static void test_mrsp_load(test_context *ctx)
1380{
1381  rtems_status_code sc;
1382  uint32_t cpu_count = rtems_get_processor_count();
1383  uint32_t index;
1384
1385  puts("test MrsP load");
1386
1387  change_prio(RTEMS_SELF, 2);
1388
1389  sc = rtems_task_create(
1390    rtems_build_name('M', 'I', 'G', 'R'),
1391    2,
1392    RTEMS_MINIMUM_STACK_SIZE,
1393    RTEMS_DEFAULT_MODES,
1394    RTEMS_DEFAULT_ATTRIBUTES,
1395    &ctx->migration_task_id
1396  );
1397  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1398
1399  sc = rtems_task_start(ctx->migration_task_id, migration_task, 0);
1400  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1401
1402  sc = rtems_semaphore_create(
1403    rtems_build_name('S', 'Y', 'N', 'C'),
1404    0,
1405    RTEMS_COUNTING_SEMAPHORE,
1406    0,
1407    &ctx->counting_sem_id
1408  );
1409  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1410
1411  for (index = 0; index < MRSP_COUNT; ++index) {
1412    sc = rtems_semaphore_create(
1413      'A' + index,
1414      1,
1415      RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
1416        | RTEMS_BINARY_SEMAPHORE,
1417      3 + index,
1418      &ctx->mrsp_ids[index]
1419    );
1420    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1421  }
1422
1423  for (index = 0; index < cpu_count; ++index) {
1424    uint32_t a = 2 * index;
1425    uint32_t b = a + 1;
1426
1427    sc = rtems_task_create(
1428      'A' + a,
1429      3 + MRSP_COUNT + a,
1430      RTEMS_MINIMUM_STACK_SIZE,
1431      RTEMS_DEFAULT_MODES,
1432      RTEMS_DEFAULT_ATTRIBUTES,
1433      &ctx->worker_ids[a]
1434    );
1435    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1436
1437    sc = rtems_task_set_scheduler(
1438      ctx->worker_ids[a],
1439      ctx->scheduler_ids[index]
1440    );
1441    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1442
1443    sc = rtems_task_start(
1444      ctx->worker_ids[a],
1445      load_worker,
1446      (rtems_task_argument) a
1447    );
1448    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1449
1450    sc = rtems_task_create(
1451      'A' + b,
1452      3 + MRSP_COUNT + b,
1453      RTEMS_MINIMUM_STACK_SIZE,
1454      RTEMS_DEFAULT_MODES,
1455      RTEMS_DEFAULT_ATTRIBUTES,
1456      &ctx->worker_ids[b]
1457    );
1458    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1459
1460    sc = rtems_task_set_scheduler(
1461      ctx->worker_ids[b],
1462      ctx->scheduler_ids[index]
1463    );
1464    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1465
1466    sc = rtems_task_start(
1467      ctx->worker_ids[b],
1468      load_worker,
1469      (rtems_task_argument) b
1470    );
1471    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1472  }
1473
1474  sc = rtems_task_wake_after(30 * rtems_clock_get_ticks_per_second());
1475  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1476
1477  for (index = 0; index < 2 * cpu_count; ++index) {
1478    ctx->stop_worker[index] = true;
1479  }
1480
1481  for (index = 0; index < 2 * cpu_count; ++index) {
1482    sc = rtems_semaphore_obtain(
1483      ctx->counting_sem_id,
1484      RTEMS_WAIT,
1485      RTEMS_NO_TIMEOUT
1486    );
1487    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1488  }
1489
1490  for (index = 0; index < 2 * cpu_count; ++index) {
1491    sc = rtems_task_delete(ctx->worker_ids[index]);
1492    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1493  }
1494
1495  for (index = 0; index < MRSP_COUNT; ++index) {
1496    sc = rtems_semaphore_delete(ctx->mrsp_ids[index]);
1497    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1498  }
1499
1500  sc = rtems_semaphore_delete(ctx->counting_sem_id);
1501  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1502
1503  sc = rtems_task_delete(ctx->migration_task_id);
1504  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1505
1506  for (index = 0; index < 2 * cpu_count; ++index) {
1507    uint32_t nest_level;
1508    uint32_t cpu_index;
1509
1510    printf(
1511      "worker[%" PRIu32 "]\n"
1512        "  sleep = %" PRIu32 "\n"
1513        "  timeout = %" PRIu32 "\n",
1514      index,
1515      ctx->counters[index].sleep,
1516      ctx->counters[index].timeout
1517    );
1518
1519    for (nest_level = 0; nest_level < MRSP_COUNT; ++nest_level) {
1520      printf(
1521        "  obtain[%" PRIu32 "] = %" PRIu32 "\n",
1522        nest_level,
1523        ctx->counters[index].obtain[nest_level]
1524      );
1525    }
1526
1527    for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
1528      printf(
1529        "  cpu[%" PRIu32 "] = %" PRIu32 "\n",
1530        cpu_index,
1531        ctx->counters[index].cpu[cpu_index]
1532      );
1533    }
1534  }
1535
1536  for (index = 0; index < cpu_count; ++index) {
1537    printf(
1538      "migrations[%" PRIu32 "] = %" PRIu32 "\n",
1539      index,
1540      ctx->migration_counters[index]
1541    );
1542  }
1543}
1544
1545static void Init(rtems_task_argument arg)
1546{
1547  test_context *ctx = &test_instance;
1548  rtems_status_code sc;
1549  rtems_resource_snapshot snapshot;
1550  uint32_t cpu_count = rtems_get_processor_count();
1551  uint32_t cpu_index;
1552
1553  TEST_BEGIN();
1554
1555  rtems_resource_snapshot_take(&snapshot);
1556
1557  ctx->main_task_id = rtems_task_self();
1558
1559  for (cpu_index = 0; cpu_index < 2; ++cpu_index) {
1560    sc = rtems_scheduler_ident(cpu_index, &ctx->scheduler_ids[cpu_index]);
1561    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1562  }
1563
1564  for (cpu_index = 2; cpu_index < cpu_count; ++cpu_index) {
1565    sc = rtems_scheduler_ident(
1566      cpu_index / 2 + 1,
1567      &ctx->scheduler_ids[cpu_index]
1568    );
1569    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
1570  }
1571
1572  test_mrsp_flush_error();
1573  test_mrsp_initially_locked_error();
1574  test_mrsp_nested_obtain_error();
1575  test_mrsp_unlock_order_error();
1576  test_mrsp_deadlock_error(ctx);
1577  test_mrsp_multiple_obtain();
1578  test_mrsp_various_block_and_unblock(ctx);
1579  test_mrsp_obtain_and_sleep_and_release(ctx);
1580  test_mrsp_obtain_and_release_with_help(ctx);
1581  test_mrsp_obtain_and_release(ctx);
1582  test_mrsp_load(ctx);
1583
1584  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
1585
1586  TEST_END();
1587  rtems_test_exit(0);
1588}
1589
1590#define CONFIGURE_SMP_APPLICATION
1591
1592#define CONFIGURE_MICROSECONDS_PER_TICK 1000
1593
1594#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
1595#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
1596
1597#define CONFIGURE_MAXIMUM_TASKS (2 * CPU_COUNT + 2)
1598#define CONFIGURE_MAXIMUM_SEMAPHORES (MRSP_COUNT + 1)
1599#define CONFIGURE_MAXIMUM_MRSP_SEMAPHORES MRSP_COUNT
1600#define CONFIGURE_MAXIMUM_TIMERS 1
1601
1602#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
1603
1604#define CONFIGURE_SCHEDULER_SIMPLE_SMP
1605
1606#include <rtems/scheduler.h>
1607
1608RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(0);
1609RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(1);
1610RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(2);
1611RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(3);
1612RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(4);
1613RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(5);
1614RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(6);
1615RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(7);
1616RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(8);
1617RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(9);
1618RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(10);
1619RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(11);
1620RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(12);
1621RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(13);
1622RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(14);
1623RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(15);
1624RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(16);
1625
1626#define CONFIGURE_SCHEDULER_CONTROLS \
1627  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(0, 0), \
1628  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(1, 1), \
1629  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(2, 2), \
1630  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(3, 3), \
1631  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(4, 4), \
1632  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(5, 5), \
1633  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(6, 6), \
1634  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(7, 7), \
1635  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(8, 8), \
1636  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(9, 9), \
1637  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(10, 10), \
1638  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(11, 11), \
1639  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(12, 12), \
1640  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(13, 13), \
1641  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(14, 14), \
1642  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(15, 15), \
1643  RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(16, 16)
1644
1645#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
1646  RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
1647  RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
1648  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1649  RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1650  RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1651  RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1652  RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1653  RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1654  RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1655  RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1656  RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1657  RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1658  RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1659  RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1660  RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1661  RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1662  RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1663  RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1664  RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1665  RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1666  RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1667  RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1668  RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1669  RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1670  RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1671  RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1672  RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1673  RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1674  RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1675  RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1676  RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
1677  RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
1678
1679#define CONFIGURE_INITIAL_EXTENSIONS \
1680  { .thread_switch = switch_extension }, \
1681  RTEMS_TEST_INITIAL_EXTENSION
1682
1683#define CONFIGURE_INIT_TASK_NAME rtems_build_name('M', 'A', 'I', 'N')
1684#define CONFIGURE_INIT_TASK_PRIORITY 2
1685
1686#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
1687
1688#define CONFIGURE_INIT
1689
1690#include <rtems/confdefs.h>
Note: See TracBrowser for help on using the repository browser.