Changeset d79df38 in rtems


Ignore:
Timestamp:
Jul 26, 2016, 8:34:21 AM (3 years ago)
Author:
Sebastian Huber <sebastian.huber@…>
Branches:
master
Children:
b1ef3674
Parents:
1fcac5a
git-author:
Sebastian Huber <sebastian.huber@…> (07/26/16 08:34:21)
git-committer:
Sebastian Huber <sebastian.huber@…> (07/27/16 08:55:30)
Message:

score: Add deadlock detection

The mutex objects use the owner field of the thread queues for the mutex
owner. Use this and add a deadlock detection to
_Thread_queue_Enqueue_critical() for thread queues with an owner.

Update #2412.
Update #2556.
Close #2765.

Files:
13 edited

Legend:

Unmodified
Added
Removed
  • cpukit/sapi/src/interrtext.c

    r1fcac5a rd79df38  
    88
    99/*
    10  * Copyright (c) 2012-2015 embedded brains GmbH.  All rights reserved.
     10 * Copyright (c) 2012, 2016 embedded brains GmbH.  All rights reserved.
    1111 *
    1212 *  embedded brains GmbH
     
    5555  "INTERNAL_ERROR_RESOURCE_IN_USE",
    5656  "INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL",
    57   "INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL"
     57  "INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL",
     58  "INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK"
    5859};
    5960
  • cpukit/score/include/rtems/score/interr.h

    r1fcac5a rd79df38  
    164164  INTERNAL_ERROR_RESOURCE_IN_USE,
    165165  INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL,
    166   INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL
     166  INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL,
     167  INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
    167168} Internal_errors_Core_list;
    168169
  • cpukit/score/include/rtems/score/thread.h

    r1fcac5a rd79df38  
    328328    Chain_Control Pending_requests;
    329329  } Lock;
     330
     331  /**
     332   * @brief Thread queue link provided for use by the thread wait lock owner to
     333   * build a thread queue path.
     334   */
     335  Thread_queue_Link Link;
    330336#endif
    331337
  • cpukit/score/include/rtems/score/threadq.h

    r1fcac5a rd79df38  
    5050typedef struct Thread_queue_Path Thread_queue_Path;
    5151
     52/**
     53 * @brief Thread queue deadlock callout.
     54 *
     55 * @param the_thread The thread that detected the deadlock.
     56 *
     57 * @see _Thread_queue_Context_set_deadlock_callout().
     58 */
     59typedef void ( *Thread_queue_Deadlock_callout )(
     60  Thread_Control *the_thread
     61);
     62
    5263#if defined(RTEMS_MULTIPROCESSING)
    5364/**
     
    118129
    119130  /**
     131   * @brief Invoked in case of a detected deadlock.
     132   *
     133   * Must be initialized for _Thread_queue_Enqueue_critical() in case the
     134   * thread queue may have an owner, e.g. for mutex objects.
     135   *
     136   * @see _Thread_queue_Context_set_deadlock_callout().
     137   */
     138  Thread_queue_Deadlock_callout deadlock_callout;
     139
     140#if defined(RTEMS_MULTIPROCESSING)
     141  /**
    120142   * @brief Callout to unblock the thread in case it is actually a thread
    121143   * proxy.
     
    127149   * @see _Thread_queue_Context_set_MP_callout().
    128150   */
    129 #if defined(RTEMS_MULTIPROCESSING)
    130151  Thread_queue_MP_callout mp_callout;
    131152#endif
     
    176197typedef struct {
    177198  /**
     199   * @brief Node to register this link in the global thread queue links lookup
     200   * tree.
     201   */
     202  RBTree_Node Registry_node;
     203
     204  /**
     205   * @brief The source thread queue determined by the thread queue owner.
     206   */
     207  Thread_queue_Queue *source;
     208
     209  /**
     210   * @brief The target thread queue determined by the thread wait queue of the
     211   * source owner.
     212   */
     213  Thread_queue_Queue *target;
     214
     215  /**
     216   * @brief Node to add this link to a thread queue path.
     217   */
     218  Chain_Node Path_node;
     219
     220  /**
    178221   * @brief The owner of this thread queue link.
    179222   */
  • cpukit/score/include/rtems/score/threadqimpl.h

    r1fcac5a rd79df38  
    5252struct Thread_queue_Path {
    5353#if defined(RTEMS_SMP)
     54  /**
     55   * @brief The chain of thread queue links defining the thread queue path.
     56   */
     57  Chain_Control Links;
     58
    5459  /**
    5560   * @brief The start of a thread queue path.
     
    8792
    8893/**
     94 * @brief Sets the thread wait return code to STATUS_DEADLOCK.
     95 */
     96void _Thread_queue_Deadlock_status( Thread_Control *the_thread );
     97
     98/**
     99 * @brief Results in an INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK fatal error.
     100 */
     101void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread );
     102
     103/**
    89104 * @brief Initializes a thread queue context.
    90105 *
     
    98113  memset( queue_context, 0, sizeof( *queue_context ) );
    99114  queue_context->expected_thread_dispatch_disable_level = 0xdeadbeef;
     115  queue_context->deadlock_callout = _Thread_queue_Deadlock_fatal;
    100116#else
    101117  (void) queue_context;
     
    171187  queue_context->timeout_discipline = WATCHDOG_ABSOLUTE;
    172188  queue_context->timeout = timeout;
     189}
     190
     191/**
     192 * @brief Sets the deadlock callout in the thread queue
     193 * context.
     194 *
     195 * A deadlock callout must be provided for _Thread_queue_Enqueue_critical()
     196 * operations that operate on thread queues which may have an owner, e.g. mutex
     197 * objects.  Available deadlock callouts are _Thread_queue_Deadlock_status()
     198 * and _Thread_queue_Deadlock_fatal().
     199 *
     200 * @param queue_context The thread queue context.
     201 * @param deadlock_callout The deadlock callout.
     202 *
     203 * @see _Thread_queue_Enqueue_critical().
     204 */
     205RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_deadlock_callout(
     206  Thread_queue_Context          *queue_context,
     207  Thread_queue_Deadlock_callout  deadlock_callout
     208)
     209{
     210  queue_context->deadlock_callout = deadlock_callout;
    173211}
    174212
  • cpukit/score/src/coremutexseize.c

    r1fcac5a rd79df38  
    6363#endif
    6464
     65  _Thread_queue_Context_set_deadlock_callout(
     66    queue_context,
     67    _Thread_queue_Deadlock_status
     68  );
     69
    6570  _Thread_queue_Enqueue_critical(
    6671    &the_mutex->Wait_queue.Queue,
     
    8893  if ( wait ) {
    8994    _Thread_queue_Context_set_expected_level( queue_context, 1 );
     95    _Thread_queue_Context_set_deadlock_callout(
     96      queue_context,
     97      _Thread_queue_Deadlock_status
     98    );
    9099    _Thread_queue_Enqueue_critical(
    91100      &the_mutex->Wait_queue.Queue,
  • cpukit/score/src/mutex.c

    r1fcac5a rd79df38  
    109109{
    110110  _Thread_queue_Context_set_expected_level( queue_context, 1 );
     111  _Thread_queue_Context_set_deadlock_callout(
     112    queue_context,
     113    _Thread_queue_Deadlock_fatal
     114  );
    111115  _Thread_queue_Enqueue_critical(
    112116    &mutex->Queue.Queue,
  • cpukit/score/src/threadqenqueue.c

    r1fcac5a rd79df38  
    1010 *  On-Line Applications Research Corporation (OAR).
    1111 *
     12 *  Copyright (c) 2015, 2016 embedded brains GmbH.
     13 *
    1214 *  The license and distribution terms for this file may be
    1315 *  found in the file LICENSE in this distribution or at
     
    3537  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN)
    3638
     39#if defined(RTEMS_SMP)
     40/*
     41 * A global registry of active thread queue links is used to provide deadlock
     42 * detection on SMP configurations.  This is simple to implement and no
     43 * additional storage is required for the thread queues.  The disadvantage is
     44 * the global registry is not scalable and may lead to lock contention.
     45 * However, the registry is only used in case of nested resource conflicts.  In
     46 * this case, the application is already in trouble.
     47 */
     48
     49typedef struct {
     50  ISR_lock_Control Lock;
     51
     52  RBTree_Control Links;
     53} Thread_queue_Links;
     54
     55static Thread_queue_Links _Thread_queue_Links = {
     56  ISR_LOCK_INITIALIZER( "Thread Queue Links" ),
     57  RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links )
     58};
     59
     60static bool _Thread_queue_Link_equal(
     61  const void        *left,
     62  const RBTree_Node *right
     63)
     64{
     65  const Thread_queue_Queue *the_left;
     66  const Thread_queue_Link  *the_right;
     67
     68  the_left = left;
     69  the_right = (Thread_queue_Link *) right;
     70
     71  return the_left == the_right->source;
     72}
     73
     74static bool _Thread_queue_Link_less(
     75  const void        *left,
     76  const RBTree_Node *right
     77)
     78{
     79  const Thread_queue_Queue *the_left;
     80  const Thread_queue_Link  *the_right;
     81
     82  the_left = left;
     83  the_right = (Thread_queue_Link *) right;
     84
     85  return (uintptr_t) the_left < (uintptr_t) the_right->source;
     86}
     87
     88static void *_Thread_queue_Link_map( RBTree_Node *node )
     89{
     90  return node;
     91}
     92
     93static Thread_queue_Link *_Thread_queue_Link_find(
     94  Thread_queue_Links *links,
     95  Thread_queue_Queue *source
     96)
     97{
     98  return _RBTree_Find_inline(
     99    &links->Links,
     100    source,
     101    _Thread_queue_Link_equal,
     102    _Thread_queue_Link_less,
     103    _Thread_queue_Link_map
     104  );
     105}
     106
     107static bool _Thread_queue_Link_add(
     108  Thread_queue_Link  *link,
     109  Thread_queue_Queue *source,
     110  Thread_queue_Queue *target
     111)
     112{
     113  Thread_queue_Links *links;
     114  Thread_queue_Queue *recursive_target;
     115  ISR_lock_Context    lock_context;
     116
     117  links = &_Thread_queue_Links;
     118  recursive_target = target;
     119
     120  _ISR_lock_Acquire( &links->Lock, &lock_context );
     121
     122  while ( true ) {
     123    Thread_queue_Link *recursive_link;
     124
     125    recursive_link = _Thread_queue_Link_find( links, recursive_target );
     126
     127    if ( recursive_link == NULL ) {
     128      break;
     129    }
     130
     131    recursive_target = recursive_link->target;
     132
     133    if ( recursive_target == source ) {
     134      _ISR_lock_Release( &links->Lock, &lock_context );
     135      return false;
     136    }
     137  }
     138
     139  link->source = source;
     140  link->target = target;
     141  _RBTree_Insert_inline(
     142    &links->Links,
     143    &link->Registry_node,
     144    source,
     145    _Thread_queue_Link_less
     146  );
     147
     148  _ISR_lock_Release( &links->Lock, &lock_context );
     149  return true;
     150}
     151
     152static void _Thread_queue_Link_remove( Thread_queue_Link *link )
     153{
     154  Thread_queue_Links *links;
     155  ISR_lock_Context    lock_context;
     156
     157  links = &_Thread_queue_Links;
     158
     159  _ISR_lock_Acquire( &links->Lock, &lock_context );
     160  _RBTree_Extract( &links->Links, &link->Registry_node );
     161  _ISR_lock_Release( &links->Lock, &lock_context );
     162}
     163#endif
     164
    37165static void _Thread_queue_Path_release( Thread_queue_Path *path )
    38166{
    39167#if defined(RTEMS_SMP)
    40   Thread_queue_Link *link;
    41 
    42   link = &path->Start;
    43 
    44   if ( link->owner != NULL ) {
     168  Chain_Node *head;
     169  Chain_Node *node;
     170
     171  head = _Chain_Head( &path->Links );
     172  node = _Chain_Last( &path->Links );
     173
     174  while ( head != node ) {
     175    Thread_queue_Link *link;
     176
     177    link = RTEMS_CONTAINER_OF( node, Thread_queue_Link, Path_node );
     178
     179    if ( link->Queue_context.Wait.queue_lock != NULL ) {
     180      _Thread_queue_Link_remove( link );
     181    }
     182
    45183    _Thread_Wait_release_critical( link->owner, &link->Queue_context );
     184
     185    node = _Chain_Previous( node );
     186#if defined(RTEMS_DEBUG)
     187    _Chain_Set_off_chain( &link->Path_node );
     188#endif
    46189  }
    47190#else
     
    50193}
    51194
    52 static void _Thread_queue_Path_acquire(
     195static bool _Thread_queue_Path_acquire(
    53196  Thread_Control     *the_thread,
    54197  Thread_queue_Queue *queue,
     
    56199)
    57200{
     201  Thread_Control     *owner;
     202
    58203#if defined(RTEMS_SMP)
    59   Thread_Control     *owner;
    60204  Thread_queue_Link  *link;
     205  Thread_queue_Queue *target;
     206
     207  /*
     208   * For an overview please look at the non-SMP part below.  We basically do
     209   * the same on SMP configurations.  The fact that we may have more than one
     210   * executing thread and each thread queue has its own SMP lock makes the task
     211   * a bit more difficult.  We have to avoid deadlocks at SMP lock level, since
     212   * this would result in an unrecoverable deadlock of the overall system.
     213   */
     214
     215  _Chain_Initialize_empty( &path->Links );
     216  _Chain_Initialize_node( &path->Start.Path_node );
     217  _Thread_queue_Context_initialize( &path->Start.Queue_context );
    61218
    62219  owner = queue->owner;
    63220
    64221  if ( owner == NULL ) {
    65     return;
     222    return true;
     223  }
     224
     225  if ( owner == the_thread ) {
     226    return false;
    66227  }
    67228
    68229  link = &path->Start;
    69   link->owner = owner;
    70 
    71   _Thread_Wait_acquire_default_critical(
    72     owner,
    73     &link->Queue_context.Lock_context
    74   );
     230
     231  do {
     232    _Chain_Append_unprotected( &path->Links, &link->Path_node );
     233    link->owner = owner;
     234
     235    _Thread_Wait_acquire_default_critical(
     236      owner,
     237      &link->Queue_context.Lock_context
     238    );
     239
     240    target = owner->Wait.queue;
     241    link->Queue_context.Wait.queue = target;
     242    link->Queue_context.Wait.operations = owner->Wait.operations;
     243
     244    if ( target != NULL ) {
     245      if ( _Thread_queue_Link_add( link, queue, target ) ) {
     246        link->Queue_context.Wait.queue_lock = &target->Lock;
     247        _Chain_Append_unprotected(
     248          &owner->Wait.Lock.Pending_requests,
     249          &link->Queue_context.Wait.Gate.Node
     250        );
     251        _Thread_Wait_release_default_critical(
     252          owner,
     253          &link->Queue_context.Lock_context
     254        );
     255        _Thread_Wait_acquire_queue_critical(
     256          &target->Lock,
     257          &link->Queue_context
     258        );
     259
     260        if ( link->Queue_context.Wait.queue == NULL ) {
     261          return true;
     262        }
     263      } else {
     264        link->Queue_context.Wait.queue_lock = NULL;
     265        _Thread_queue_Path_release( path );
     266        return false;
     267      }
     268    } else {
     269      link->Queue_context.Wait.queue_lock = NULL;
     270      return true;
     271    }
     272
     273    link = &owner->Wait.Link;
     274    queue = target;
     275    owner = queue->owner;
     276  } while ( owner != NULL );
    75277#else
    76   (void) the_thread;
    77   (void) queue;
    78   (void) path;
    79 #endif
     278  do {
     279    owner = queue->owner;
     280
     281    if ( owner == NULL ) {
     282      return true;
     283    }
     284
     285    if ( owner == the_thread ) {
     286      return false;
     287    }
     288
     289    queue = owner->Wait.queue;
     290  } while ( queue != NULL );
     291#endif
     292
     293  return true;
     294}
     295
     296void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
     297{
     298  the_thread->Wait.return_code = STATUS_DEADLOCK;
     299}
     300
     301void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
     302{
     303  _Terminate(
     304    INTERNAL_ERROR_CORE,
     305    false,
     306    INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
     307  );
    80308}
    81309
     
    100328  _Thread_Wait_claim( the_thread, queue, operations );
    101329
    102   _Thread_queue_Path_acquire( the_thread, queue, &path );
     330  if ( !_Thread_queue_Path_acquire( the_thread, queue, &path ) ) {
     331    _Thread_Wait_restore_default( the_thread );
     332    _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
     333    ( *queue_context->deadlock_callout )( the_thread );
     334    return;
     335  }
     336
    103337  ( *operations->enqueue )( queue, the_thread, &path );
     338
    104339  _Thread_queue_Path_release( &path );
    105340
  • testsuites/sptests/spinternalerror02/init.c

    r1fcac5a rd79df38  
    11/*
    2  * Copyright (c) 2012-2015 embedded brains GmbH.  All rights reserved.
     2 * Copyright (c) 2012, 2016 embedded brains GmbH.  All rights reserved.
    33 *
    44 *  embedded brains GmbH
     
    3737
    3838  rtems_test_assert(
    39     error - 3 == INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL
     39    error - 3 == INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
    4040  );
    4141}
  • testsuites/sptests/spinternalerror02/spinternalerror02.scn

    r1fcac5a rd79df38  
    1818INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY
    1919OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL
    20 INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE
     20INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
    2121INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0
    2222OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP
     
    2828INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL
    2929INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL
     30INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
    3031?
    3132?
  • testsuites/sptests/spmutex01/init.c

    r1fcac5a rd79df38  
    1717#endif
    1818
     19#include <threads.h>
     20#include <setjmp.h>
     21
     22#include <rtems.h>
     23#include <rtems/libcsupport.h>
     24
     25#ifdef RTEMS_POSIX_API
     26#include <errno.h>
     27#include <pthread.h>
     28#endif
     29
    1930#include "tmacros.h"
    2031
     
    2233
    2334#define TASK_COUNT 5
     35
     36#define MTX_COUNT 3
    2437
    2538typedef enum {
    2639  REQ_WAKE_UP_MASTER = RTEMS_EVENT_0,
    2740  REQ_WAKE_UP_HELPER = RTEMS_EVENT_1,
    28   REQ_MTX_OBTAIN = RTEMS_EVENT_2,
    29   REQ_MTX_RELEASE = RTEMS_EVENT_3
     41  REQ_MTX_0_OBTAIN = RTEMS_EVENT_2,
     42  REQ_MTX_0_RELEASE = RTEMS_EVENT_3,
     43  REQ_MTX_1_OBTAIN = RTEMS_EVENT_4,
     44  REQ_MTX_1_RELEASE = RTEMS_EVENT_5,
     45  REQ_MTX_2_OBTAIN = RTEMS_EVENT_6,
     46  REQ_MTX_2_RELEASE = RTEMS_EVENT_7,
     47  REQ_MTX_C11_OBTAIN = RTEMS_EVENT_8,
     48  REQ_MTX_C11_RELEASE = RTEMS_EVENT_9,
     49  REQ_MTX_POSIX_OBTAIN = RTEMS_EVENT_10,
     50  REQ_MTX_POSIX_RELEASE = RTEMS_EVENT_11
    3051} request_id;
    3152
    3253typedef enum {
     54  M,
    3355  A_1,
    3456  A_2_0,
    3557  A_2_1,
    36   M,
    3758  H,
    3859  NONE
    3960} task_id;
    4061
     62typedef enum {
     63  MTX_0,
     64  MTX_1,
     65  MTX_2
     66} mutex_id;
     67
    4168typedef struct {
    42   rtems_id mtx;
     69  rtems_id mtx[MTX_COUNT];
     70  mtx_t mtx_c11;
     71#ifdef RTEMS_POSIX_API
     72  pthread_mutex_t mtx_posix;
     73#endif
    4374  rtems_id tasks[TASK_COUNT];
    4475  int generation[TASK_COUNT];
    4576  int expected_generation[TASK_COUNT];
     77  jmp_buf deadlock_return_context;
    4678} test_context;
    4779
     
    110142}
    111143
    112 static void obtain(test_context *ctx)
    113 {
    114   rtems_status_code sc;
    115 
    116   sc = rtems_semaphore_obtain(ctx->mtx, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     144static void obtain(test_context *ctx, mutex_id id)
     145{
     146  rtems_status_code sc;
     147
     148  sc = rtems_semaphore_obtain(ctx->mtx[id], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    117149  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    118150}
    119151
    120 static void release(test_context *ctx)
    121 {
    122   rtems_status_code sc;
    123 
    124   sc = rtems_semaphore_release(ctx->mtx);
     152static void deadlock_obtain(test_context *ctx, mutex_id id)
     153{
     154  rtems_status_code sc;
     155
     156  sc = rtems_semaphore_obtain(ctx->mtx[id], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
     157  rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
     158}
     159
     160static void release(test_context *ctx, mutex_id id)
     161{
     162  rtems_status_code sc;
     163
     164  sc = rtems_semaphore_release(ctx->mtx[id]);
    125165  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    126166}
     167
     168static void obtain_c11(test_context *ctx)
     169{
     170  int status;
     171
     172  status = mtx_lock(&ctx->mtx_c11);
     173  rtems_test_assert(status == thrd_success);
     174}
     175
     176static void deadlock_obtain_c11(test_context *ctx)
     177{
     178  if (setjmp(ctx->deadlock_return_context) == 0) {
     179    (void) mtx_lock(&ctx->mtx_c11);
     180  }
     181}
     182
     183static void release_c11(test_context *ctx)
     184{
     185  int status;
     186
     187  status = mtx_unlock(&ctx->mtx_c11);
     188  rtems_test_assert(status == thrd_success);
     189}
     190
     191#ifdef RTEMS_POSIX_API
     192static void obtain_posix(test_context *ctx)
     193{
     194  int error;
     195
     196  error = pthread_mutex_lock(&ctx->mtx_posix);
     197  rtems_test_assert(error == 0);
     198}
     199
     200static void deadlock_obtain_posix(test_context *ctx)
     201{
     202  int error;
     203
     204  error = pthread_mutex_lock(&ctx->mtx_posix);
     205  rtems_test_assert(error == EDEADLK);
     206}
     207
     208static void release_posix(test_context *ctx)
     209{
     210  int error;
     211
     212  error = pthread_mutex_unlock(&ctx->mtx_posix);
     213  rtems_test_assert(error == 0);
     214}
     215#endif
    127216
    128217static void check_generations(test_context *ctx, task_id a, task_id b)
     
    180269    rtems_event_set events = wait_for_events();
    181270
    182     if ((events & REQ_MTX_OBTAIN) != 0) {
    183       obtain(ctx);
    184       ++ctx->generation[id];
    185     }
    186 
    187     if ((events & REQ_MTX_RELEASE) != 0) {
    188       release(ctx);
    189       ++ctx->generation[id];
    190     }
    191   }
    192 }
    193 
    194 static void test(void)
    195 {
    196   test_context *ctx = &test_instance;
    197   rtems_status_code sc;
     271    if ((events & REQ_MTX_0_OBTAIN) != 0) {
     272      obtain(ctx, MTX_0);
     273      ++ctx->generation[id];
     274    }
     275
     276    if ((events & REQ_MTX_0_RELEASE) != 0) {
     277      release(ctx, MTX_0);
     278      ++ctx->generation[id];
     279    }
     280
     281    if ((events & REQ_MTX_1_OBTAIN) != 0) {
     282      obtain(ctx, MTX_1);
     283      ++ctx->generation[id];
     284    }
     285
     286    if ((events & REQ_MTX_1_RELEASE) != 0) {
     287      release(ctx, MTX_1);
     288      ++ctx->generation[id];
     289    }
     290
     291    if ((events & REQ_MTX_2_OBTAIN) != 0) {
     292      obtain(ctx, MTX_2);
     293      ++ctx->generation[id];
     294    }
     295
     296    if ((events & REQ_MTX_2_RELEASE) != 0) {
     297      release(ctx, MTX_2);
     298      ++ctx->generation[id];
     299    }
     300
     301    if ((events & REQ_MTX_C11_OBTAIN) != 0) {
     302      obtain_c11(ctx);
     303      ++ctx->generation[id];
     304    }
     305
     306    if ((events & REQ_MTX_C11_RELEASE) != 0) {
     307      release_c11(ctx);
     308      ++ctx->generation[id];
     309    }
     310
     311#ifdef RTEMS_POSIX_API
     312    if ((events & REQ_MTX_POSIX_OBTAIN) != 0) {
     313      obtain_posix(ctx);
     314      ++ctx->generation[id];
     315    }
     316
     317    if ((events & REQ_MTX_POSIX_RELEASE) != 0) {
     318      release_posix(ctx);
     319      ++ctx->generation[id];
     320    }
     321#endif
     322  }
     323}
     324
     325static void set_up(test_context *ctx)
     326{
     327  rtems_status_code sc;
     328  int status;
     329  size_t i;
    198330
    199331  ctx->tasks[M] = rtems_task_self();
     
    203335  start_task(ctx, H, helper, 3);
    204336
    205   sc = rtems_semaphore_create(
    206     rtems_build_name(' ', 'M', 'T', 'X'),
    207     1,
    208     RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
    209     0,
    210     &ctx->mtx
    211   );
    212   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    213 
    214   obtain(ctx);
    215   request(ctx, A_1, REQ_MTX_OBTAIN);
     337  for (i = 0; i < MTX_COUNT; ++i) {
     338    sc = rtems_semaphore_create(
     339      rtems_build_name(' ', 'M', 'T', 'X'),
     340      1,
     341      RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
     342      0,
     343      &ctx->mtx[i]
     344    );
     345    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     346  }
     347
     348  status = mtx_init(&ctx->mtx_c11, mtx_plain);
     349  rtems_test_assert(status == thrd_success);
     350
     351#ifdef RTEMS_POSIX_API
     352  {
     353    int error;
     354    pthread_mutexattr_t attr;
     355
     356    error = pthread_mutexattr_init(&attr);
     357    rtems_test_assert(error == 0);
     358
     359    error = pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT);
     360    rtems_test_assert(error == 0);
     361
     362    error = pthread_mutex_init(&ctx->mtx_posix, &attr);
     363    rtems_test_assert(error == 0);
     364
     365    error = pthread_mutexattr_destroy(&attr);
     366    rtems_test_assert(error == 0);
     367  }
     368#endif
     369}
     370
     371static void test_inherit(test_context *ctx)
     372{
     373  obtain(ctx, MTX_0);
     374  request(ctx, A_1, REQ_MTX_0_OBTAIN);
    216375  check_generations(ctx, NONE, NONE);
    217376  assert_prio(ctx, M, 1);
    218   release(ctx);
     377  release(ctx, MTX_0);
    219378  check_generations(ctx, A_1, NONE);
    220379  assert_prio(ctx, M, 3);
    221   request(ctx, A_1, REQ_MTX_RELEASE);
    222   check_generations(ctx, A_1, NONE);
    223 
    224   obtain(ctx);
    225   request(ctx, A_2_0, REQ_MTX_OBTAIN);
    226   request(ctx, A_1, REQ_MTX_OBTAIN);
    227   request(ctx, A_2_1, REQ_MTX_OBTAIN);
     380  request(ctx, A_1, REQ_MTX_0_RELEASE);
     381  check_generations(ctx, A_1, NONE);
     382}
     383
     384static void test_inherit_fifo_for_equal_priority(test_context *ctx)
     385{
     386  obtain(ctx, MTX_0);
     387  request(ctx, A_2_0, REQ_MTX_0_OBTAIN);
     388  request(ctx, A_1, REQ_MTX_0_OBTAIN);
     389  request(ctx, A_2_1, REQ_MTX_0_OBTAIN);
    228390  check_generations(ctx, NONE, NONE);
    229391  assert_prio(ctx, M, 1);
    230   release(ctx);
     392  release(ctx, MTX_0);
    231393  check_generations(ctx, A_1, NONE);
    232394  assert_prio(ctx, M, 3);
    233395  assert_prio(ctx, A_1, 1);
    234   request(ctx, A_1, REQ_MTX_RELEASE);
     396  request(ctx, A_1, REQ_MTX_0_RELEASE);
    235397  check_generations(ctx, A_1, A_2_0);
    236   request(ctx, A_2_0, REQ_MTX_RELEASE);
     398  request(ctx, A_2_0, REQ_MTX_0_RELEASE);
    237399  check_generations(ctx, A_2_0, A_2_1);
    238   request(ctx, A_2_1, REQ_MTX_RELEASE);
     400  request(ctx, A_2_1, REQ_MTX_0_RELEASE);
    239401  check_generations(ctx, A_2_1, NONE);
    240402}
    241403
     404static void test_deadlock_two_classic(test_context *ctx)
     405{
     406  obtain(ctx, MTX_0);
     407  request(ctx, A_1, REQ_MTX_1_OBTAIN);
     408  check_generations(ctx, A_1, NONE);
     409  request(ctx, A_1, REQ_MTX_0_OBTAIN);
     410  check_generations(ctx, NONE, NONE);
     411  deadlock_obtain(ctx, MTX_1);
     412  release(ctx, MTX_0);
     413  check_generations(ctx, A_1, NONE);
     414  request(ctx, A_1, REQ_MTX_0_RELEASE);
     415  check_generations(ctx, A_1, NONE);
     416  request(ctx, A_1, REQ_MTX_1_RELEASE);
     417  check_generations(ctx, A_1, NONE);
     418}
     419
     420static void test_deadlock_three_classic(test_context *ctx)
     421{
     422  obtain(ctx, MTX_0);
     423  request(ctx, A_1, REQ_MTX_1_OBTAIN);
     424  check_generations(ctx, A_1, NONE);
     425  request(ctx, A_2_0, REQ_MTX_2_OBTAIN);
     426  check_generations(ctx, A_2_0, NONE);
     427  request(ctx, A_2_0, REQ_MTX_1_OBTAIN);
     428  check_generations(ctx, NONE, NONE);
     429  request(ctx, A_1, REQ_MTX_0_OBTAIN);
     430  check_generations(ctx, NONE, NONE);
     431  deadlock_obtain(ctx, MTX_2);
     432  release(ctx, MTX_0);
     433  check_generations(ctx, A_1, NONE);
     434  request(ctx, A_1, REQ_MTX_0_RELEASE);
     435  check_generations(ctx, A_1, NONE);
     436  request(ctx, A_1, REQ_MTX_1_RELEASE);
     437  check_generations(ctx, A_1, A_2_0);
     438  request(ctx, A_2_0, REQ_MTX_2_RELEASE);
     439  check_generations(ctx, A_2_0, NONE);
     440  request(ctx, A_2_0, REQ_MTX_1_RELEASE);
     441  check_generations(ctx, A_2_0, NONE);
     442}
     443
     444static void test_deadlock_c11_and_classic(test_context *ctx)
     445{
     446  obtain_c11(ctx);
     447  request(ctx, A_1, REQ_MTX_0_OBTAIN);
     448  check_generations(ctx, A_1, NONE);
     449  request(ctx, A_1, REQ_MTX_C11_OBTAIN);
     450  check_generations(ctx, NONE, NONE);
     451  deadlock_obtain(ctx, MTX_0);
     452  release_c11(ctx);
     453  check_generations(ctx, A_1, NONE);
     454  request(ctx, A_1, REQ_MTX_C11_RELEASE);
     455  check_generations(ctx, A_1, NONE);
     456  request(ctx, A_1, REQ_MTX_0_RELEASE);
     457  check_generations(ctx, A_1, NONE);
     458}
     459
     460static void test_deadlock_classic_and_c11(test_context *ctx)
     461{
     462  obtain(ctx, MTX_0);
     463  request(ctx, A_1, REQ_MTX_C11_OBTAIN);
     464  check_generations(ctx, A_1, NONE);
     465  request(ctx, A_1, REQ_MTX_0_OBTAIN);
     466  check_generations(ctx, NONE, NONE);
     467  deadlock_obtain_c11(ctx);
     468  release(ctx, MTX_0);
     469  check_generations(ctx, A_1, NONE);
     470  request(ctx, A_1, REQ_MTX_0_RELEASE);
     471  check_generations(ctx, A_1, NONE);
     472  request(ctx, A_1, REQ_MTX_C11_RELEASE);
     473  check_generations(ctx, A_1, NONE);
     474}
     475
     476static void test_deadlock_posix_and_classic(test_context *ctx)
     477{
     478#ifdef RTEMS_POSIX_API
     479  obtain_posix(ctx);
     480  request(ctx, A_1, REQ_MTX_0_OBTAIN);
     481  check_generations(ctx, A_1, NONE);
     482  request(ctx, A_1, REQ_MTX_POSIX_OBTAIN);
     483  check_generations(ctx, NONE, NONE);
     484  deadlock_obtain(ctx, MTX_0);
     485  release_posix(ctx);
     486  check_generations(ctx, A_1, NONE);
     487  request(ctx, A_1, REQ_MTX_POSIX_RELEASE);
     488  check_generations(ctx, A_1, NONE);
     489  request(ctx, A_1, REQ_MTX_0_RELEASE);
     490  check_generations(ctx, A_1, NONE);
     491#endif
     492}
     493
     494static void test_deadlock_classic_and_posix(test_context *ctx)
     495{
     496#ifdef RTEMS_POSIX_API
     497  obtain(ctx, MTX_0);
     498  request(ctx, A_1, REQ_MTX_POSIX_OBTAIN);
     499  check_generations(ctx, A_1, NONE);
     500  request(ctx, A_1, REQ_MTX_0_OBTAIN);
     501  check_generations(ctx, NONE, NONE);
     502  deadlock_obtain_posix(ctx);
     503  release(ctx, MTX_0);
     504  check_generations(ctx, A_1, NONE);
     505  request(ctx, A_1, REQ_MTX_0_RELEASE);
     506  check_generations(ctx, A_1, NONE);
     507  request(ctx, A_1, REQ_MTX_POSIX_RELEASE);
     508  check_generations(ctx, A_1, NONE);
     509#endif
     510}
     511
     512static void tear_down(test_context *ctx)
     513{
     514  rtems_status_code sc;
     515  size_t i;
     516
     517  for (i = 1; i < TASK_COUNT; ++i) {
     518    sc = rtems_task_delete(ctx->tasks[i]);
     519    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     520  }
     521
     522  for (i = 0; i < MTX_COUNT; ++i) {
     523    sc = rtems_semaphore_delete(ctx->mtx[i]);
     524    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
     525  }
     526
     527  mtx_destroy(&ctx->mtx_c11);
     528
     529#ifdef RTEMS_POSIX_API
     530  {
     531    int error;
     532
     533    error = pthread_mutex_destroy(&ctx->mtx_posix);
     534    rtems_test_assert(error == 0);
     535  }
     536#endif
     537}
     538
    242539static void Init(rtems_task_argument arg)
    243540{
     541  test_context *ctx = &test_instance;
     542  rtems_resource_snapshot snapshot;
     543
    244544  TEST_BEGIN();
    245 
    246   test();
    247 
     545  rtems_resource_snapshot_take(&snapshot);
     546
     547  set_up(ctx);
     548  test_inherit(ctx);
     549  test_inherit_fifo_for_equal_priority(ctx);
     550  test_deadlock_two_classic(ctx);
     551  test_deadlock_three_classic(ctx);
     552  test_deadlock_c11_and_classic(ctx);
     553  test_deadlock_classic_and_c11(ctx);
     554  test_deadlock_posix_and_classic(ctx);
     555  test_deadlock_classic_and_posix(ctx);
     556  tear_down(ctx);
     557
     558  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
    248559  TEST_END();
    249560  rtems_test_exit(0);
    250561}
    251562
     563static void fatal_extension(
     564  rtems_fatal_source source,
     565  bool is_internal,
     566  rtems_fatal_code error
     567)
     568{
     569
     570  if (
     571    source == INTERNAL_ERROR_CORE
     572      && !is_internal
     573      && error == INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
     574  ) {
     575    test_context *ctx = &test_instance;
     576
     577    longjmp(ctx->deadlock_return_context, 1);
     578  }
     579}
     580
    252581#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
    253582#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
     
    255584#define CONFIGURE_MAXIMUM_TASKS TASK_COUNT
    256585
    257 #define CONFIGURE_MAXIMUM_SEMAPHORES 1
    258 
    259 #define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
     586#define CONFIGURE_MAXIMUM_SEMAPHORES 3
     587
     588#ifdef RTEMS_POSIX_API
     589#define CONFIGURE_MAXIMUM_POSIX_MUTEXES 1
     590#endif
     591
     592#define CONFIGURE_INITIAL_EXTENSIONS \
     593  { .fatal = fatal_extension }, \
     594  RTEMS_TEST_INITIAL_EXTENSION
    260595
    261596#define CONFIGURE_INIT_TASK_PRIORITY 3
  • testsuites/sptests/spsyslock01/init.c

    r1fcac5a rd79df38  
    11/*
    2  * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
     2 * Copyright (c) 2015, 2016 embedded brains GmbH.  All rights reserved.
    33 *
    44 *  embedded brains GmbH
     
    2222#include <errno.h>
    2323#include <limits.h>
    24 #include <pthread.h>
     24#include <setjmp.h>
    2525#include <string.h>
    2626#include <time.h>
     
    3535
    3636#define EVENT_MTX_PRIO_INV RTEMS_EVENT_2
    37 
    38 #define EVENT_MTX_DEADLOCK RTEMS_EVENT_3
    3937
    4038#define EVENT_REC_MTX_ACQUIRE RTEMS_EVENT_4
     
    5755  rtems_id low;
    5856  struct _Mutex_Control mtx;
    59   struct _Mutex_Control deadlock_mtx;
    6057  struct _Mutex_recursive_Control rec_mtx;
    6158  struct _Condition_Control cond;
     
    6663  int generation[2];
    6764  int current_generation[2];
     65  jmp_buf deadlock_return_context;
    6866} test_context;
    6967
     
    297295
    298296  send_event(ctx, idx, EVENT_REC_MTX_RELEASE);
     297}
     298
     299static void test_mtx_deadlock(test_context *ctx)
     300{
     301  struct _Mutex_Control *mtx = &ctx->mtx;
     302
     303  _Mutex_Acquire(mtx);
     304
     305  if (setjmp(ctx->deadlock_return_context) == 0) {
     306    _Mutex_Acquire(mtx);
     307  }
     308
     309  _Mutex_Release(mtx);
    299310}
    300311
     
    494505}
    495506
    496 #ifdef RTEMS_POSIX_API
    497 static void deadlock_cleanup(void *arg)
    498 {
    499   struct _Mutex_Control *deadlock_mtx = arg;
    500 
    501   /*
    502    * The thread terminate procedure will dequeue us from the wait queue.  So,
    503    * one release is sufficient.
    504    */
    505 
    506   _Mutex_Release(deadlock_mtx);
    507   _Mutex_Destroy(deadlock_mtx);
    508 }
    509 #endif
    510 
    511507static void high_task(rtems_task_argument idx)
    512508{
     
    552548      sc = rtems_task_suspend(ctx->mid);
    553549      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    554     }
    555 
    556     if ((events & EVENT_MTX_DEADLOCK) != 0) {
    557       struct _Mutex_Control *deadlock_mtx = &ctx->deadlock_mtx;
    558 
    559 #ifdef RTEMS_POSIX_API
    560       pthread_cleanup_push(deadlock_cleanup, deadlock_mtx);
    561 #endif
    562 
    563       _Mutex_Initialize(deadlock_mtx);
    564       _Mutex_Acquire(deadlock_mtx);
    565       _Mutex_Acquire(deadlock_mtx);
    566 
    567 #ifdef RTEMS_POSIX_API
    568       pthread_cleanup_pop(0);
    569 #endif
    570550    }
    571551
     
    671651  test_mtx_timeout_normal(ctx);
    672652  test_mtx_timeout_recursive(ctx);
     653  test_mtx_deadlock(ctx);
    673654  test_condition(ctx);
    674655  test_condition_timeout(ctx);
     
    678659  test_sched();
    679660
    680   send_event(ctx, 0, EVENT_MTX_DEADLOCK);
    681 
    682661  sc = rtems_task_delete(ctx->mid);
    683662  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    684663
    685 #ifdef RTEMS_POSIX_API
    686664  sc = rtems_task_delete(ctx->high[0]);
    687665  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    688 #endif
    689666
    690667  sc = rtems_task_delete(ctx->high[1]);
     
    708685}
    709686
     687static void fatal_extension(
     688  rtems_fatal_source source,
     689  bool is_internal,
     690  rtems_fatal_code error
     691)
     692{
     693
     694  if (
     695    source == INTERNAL_ERROR_CORE
     696      && !is_internal
     697      && error == INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
     698  ) {
     699    test_context *ctx = &test_instance;
     700
     701    longjmp(ctx->deadlock_return_context, 1);
     702  }
     703}
     704
    710705#define CONFIGURE_MICROSECONDS_PER_TICK US_PER_TICK
    711706
     
    715710#define CONFIGURE_MAXIMUM_TASKS 4
    716711
    717 #define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
     712#define CONFIGURE_INITIAL_EXTENSIONS \
     713  { .fatal = fatal_extension }, \
     714  RTEMS_TEST_INITIAL_EXTENSION
    718715
    719716#define CONFIGURE_INIT_TASK_PRIORITY 4
Note: See TracChangeset for help on using the changeset viewer.