source: rtems/cpukit/score/src/mutex.c @ 44f3ea9

Last change on this file since 44f3ea9 was 44f3ea9, checked in by Sebastian Huber <sebastian.huber@…>, on Aug 31, 2015 at 11:37:52 AM

score: Fix return status of mutex try acquire

This fixes a copy and paste error (from libbsd).

  • Property mode set to 100644
File size: 10.4 KB
Line 
1/*
2 * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#if HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#if HAVE_STRUCT__THREAD_QUEUE_QUEUE
20
21#include <sys/lock.h>
22#include <errno.h>
23
24#include <rtems/score/assert.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/threadqimpl.h>
27#include <rtems/score/todimpl.h>
28
29#define MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority
30
31typedef struct {
32  Thread_queue_Syslock_queue Queue;
33  Thread_Control *owner;
34} Mutex_Control;
35
36RTEMS_STATIC_ASSERT(
37  offsetof( Mutex_Control, Queue )
38    == offsetof( struct _Mutex_Control, _Queue ),
39  MUTEX_CONTROL_QUEUE
40);
41
42RTEMS_STATIC_ASSERT(
43  offsetof( Mutex_Control, owner )
44    == offsetof( struct _Mutex_Control, _owner ),
45  MUTEX_CONTROL_OWNER
46);
47
48RTEMS_STATIC_ASSERT(
49  sizeof( Mutex_Control ) == sizeof( struct _Mutex_Control ),
50  MUTEX_CONTROL_SIZE
51);
52
53typedef struct {
54  Mutex_Control Mutex;
55  unsigned int nest_level;
56} Mutex_recursive_Control;
57
58RTEMS_STATIC_ASSERT(
59  offsetof( Mutex_recursive_Control, Mutex )
60    == offsetof( struct _Mutex_recursive_Control, _Mutex ),
61  MUTEX_RECURSIVE_CONTROL_MUTEX
62);
63
64RTEMS_STATIC_ASSERT(
65  offsetof( Mutex_recursive_Control, nest_level )
66    == offsetof( struct _Mutex_recursive_Control, _nest_level ),
67  MUTEX_RECURSIVE_CONTROL_NEST_LEVEL
68);
69
70RTEMS_STATIC_ASSERT(
71  sizeof( Mutex_recursive_Control )
72    == sizeof( struct _Mutex_recursive_Control ),
73  MUTEX_RECURSIVE_CONTROL_SIZE
74);
75
76static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
77{
78  return (Mutex_Control *) _mutex;
79}
80
81static Thread_Control *_Mutex_Queue_acquire(
82  Mutex_Control    *mutex,
83  ISR_lock_Context *lock_context
84)
85{
86  Thread_Control *executing;
87
88  _ISR_lock_ISR_disable( lock_context );
89  executing = _Thread_Executing;
90  _Thread_queue_Queue_acquire_critical(
91    &mutex->Queue.Queue,
92    &executing->Potpourri_stats,
93    lock_context
94  );
95
96  return executing;
97}
98
99static void _Mutex_Queue_release(
100  Mutex_Control    *mutex,
101  ISR_lock_Context *lock_context
102)
103{
104  _Thread_queue_Queue_release( &mutex->Queue.Queue, lock_context );
105}
106
107static void _Mutex_Acquire_slow(
108  Mutex_Control     *mutex,
109  Thread_Control    *owner,
110  Thread_Control    *executing,
111  Watchdog_Interval  timeout,
112  ISR_lock_Context  *lock_context
113)
114{
115  /* Priority inheritance */
116  _Thread_Raise_priority( owner, executing->current_priority );
117
118  _Thread_queue_Enqueue_critical(
119    &mutex->Queue.Queue,
120    MUTEX_TQ_OPERATIONS,
121    executing,
122    STATES_WAITING_FOR_SYS_LOCK_MUTEX,
123    timeout,
124    ETIMEDOUT,
125    lock_context
126  );
127}
128
129static void _Mutex_Release_slow(
130  Mutex_Control      *mutex,
131  Thread_Control     *executing,
132  Thread_queue_Heads *heads,
133  bool                keep_priority,
134  ISR_lock_Context   *lock_context
135)
136{
137  if (heads != NULL) {
138    const Thread_queue_Operations *operations;
139    Thread_Control *first;
140
141    operations = MUTEX_TQ_OPERATIONS;
142    first = ( *operations->first )( heads );
143
144    mutex->owner = first;
145    _Thread_queue_Extract_critical(
146      &mutex->Queue.Queue,
147      operations,
148      first,
149      lock_context
150    );
151  } else {
152    _Mutex_Queue_release( mutex, lock_context);
153  }
154
155  if ( !keep_priority ) {
156    Per_CPU_Control *cpu_self;
157
158    cpu_self = _Thread_Dispatch_disable();
159    _Thread_Restore_priority( executing );
160    _Thread_Dispatch_enable( cpu_self );
161  }
162}
163
164static void _Mutex_Release_critical(
165  Mutex_Control *mutex,
166  Thread_Control *executing,
167  ISR_lock_Context *lock_context
168)
169{
170  Thread_queue_Heads *heads;
171  bool keep_priority;
172
173  mutex->owner = NULL;
174
175  --executing->resource_count;
176
177  /*
178   * Ensure that the owner resource count is visible to all other
179   * processors and that we read the latest priority restore
180   * hint.
181   */
182  _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
183
184  heads = mutex->Queue.Queue.heads;
185  keep_priority = _Thread_Owns_resources( executing )
186    || !executing->priority_restore_hint;
187
188  if ( __predict_true( heads == NULL && keep_priority ) ) {
189    _Mutex_Queue_release( mutex, lock_context );
190  } else {
191    _Mutex_Release_slow(
192      mutex,
193      executing,
194      heads,
195      keep_priority,
196      lock_context
197    );
198  }
199}
200
201void _Mutex_Acquire( struct _Mutex_Control *_mutex )
202{
203  Mutex_Control    *mutex;
204  ISR_lock_Context  lock_context;
205  Thread_Control   *executing;
206  Thread_Control   *owner;
207
208  mutex = _Mutex_Get( _mutex );
209  executing = _Mutex_Queue_acquire( mutex, &lock_context );
210
211  owner = mutex->owner;
212  ++executing->resource_count;
213
214  if ( __predict_true( owner == NULL ) ) {
215    mutex->owner = executing;
216    _Mutex_Queue_release( mutex, &lock_context );
217  } else {
218    _Mutex_Acquire_slow( mutex, owner, executing, 0, &lock_context );
219  }
220}
221
222int _Mutex_Acquire_timed(
223  struct _Mutex_Control *_mutex,
224  const struct timespec *abstime
225)
226{
227  Mutex_Control    *mutex;
228  ISR_lock_Context  lock_context;
229  Thread_Control   *executing;
230  Thread_Control   *owner;
231
232  mutex = _Mutex_Get( _mutex );
233  executing = _Mutex_Queue_acquire( mutex, &lock_context );
234
235  owner = mutex->owner;
236  ++executing->resource_count;
237
238  if ( __predict_true( owner == NULL ) ) {
239    mutex->owner = executing;
240    _Mutex_Queue_release( mutex, &lock_context );
241
242    return 0;
243  } else {
244    Watchdog_Interval ticks;
245
246    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
247      case TOD_ABSOLUTE_TIMEOUT_INVALID:
248        _Mutex_Queue_release( mutex, &lock_context );
249        return EINVAL;
250      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
251      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
252        _Mutex_Queue_release( mutex, &lock_context );
253        return ETIMEDOUT;
254      default:
255        break;
256    }
257
258    executing->Wait.return_code = 0;
259    _Mutex_Acquire_slow( mutex, owner, executing, ticks, &lock_context );
260
261    return (int) executing->Wait.return_code;
262  }
263}
264
265int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
266{
267  Mutex_Control    *mutex;
268  ISR_lock_Context  lock_context;
269  Thread_Control   *executing;
270  Thread_Control   *owner;
271  int               eno;
272
273  mutex = _Mutex_Get( _mutex );
274  executing = _Mutex_Queue_acquire( mutex, &lock_context );
275
276  owner = mutex->owner;
277
278  if ( __predict_true( owner == NULL ) ) {
279    mutex->owner = executing;
280    ++executing->resource_count;
281    eno = 0;
282  } else {
283    eno = EBUSY;
284  }
285
286  _Mutex_Queue_release( mutex, &lock_context );
287
288  return eno;
289}
290
291void _Mutex_Release( struct _Mutex_Control *_mutex )
292{
293  Mutex_Control    *mutex;
294  ISR_lock_Context  lock_context;
295  Thread_Control   *executing;
296
297  mutex = _Mutex_Get( _mutex );
298  executing = _Mutex_Queue_acquire( mutex, &lock_context );
299
300  _Assert( mutex->owner == executing );
301
302  _Mutex_Release_critical( mutex, executing, &lock_context );
303}
304
305static Mutex_recursive_Control *_Mutex_recursive_Get(
306  struct _Mutex_recursive_Control *_mutex
307)
308{
309  return (Mutex_recursive_Control *) _mutex;
310}
311
312void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
313{
314  Mutex_recursive_Control *mutex;
315  ISR_lock_Context         lock_context;
316  Thread_Control          *executing;
317  Thread_Control          *owner;
318
319  mutex = _Mutex_recursive_Get( _mutex );
320  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
321
322  owner = mutex->Mutex.owner;
323
324  if ( __predict_true( owner == NULL ) ) {
325    mutex->Mutex.owner = executing;
326    ++executing->resource_count;
327    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
328  } else if ( owner == executing ) {
329    ++mutex->nest_level;
330    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
331  } else {
332    _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &lock_context );
333  }
334}
335
336int _Mutex_recursive_Acquire_timed(
337  struct _Mutex_recursive_Control *_mutex,
338  const struct timespec           *abstime
339)
340{
341  Mutex_recursive_Control *mutex;
342  ISR_lock_Context         lock_context;
343  Thread_Control          *executing;
344  Thread_Control          *owner;
345
346  mutex = _Mutex_recursive_Get( _mutex );
347  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
348
349  owner = mutex->Mutex.owner;
350
351  if ( __predict_true( owner == NULL ) ) {
352    mutex->Mutex.owner = executing;
353    ++executing->resource_count;
354    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
355
356    return 0;
357  } else if ( owner == executing ) {
358    ++mutex->nest_level;
359    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
360
361    return 0;
362  } else {
363    Watchdog_Interval ticks;
364
365    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
366      case TOD_ABSOLUTE_TIMEOUT_INVALID:
367        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
368        return EINVAL;
369      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
370      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
371        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
372        return ETIMEDOUT;
373      default:
374        break;
375    }
376
377    executing->Wait.return_code = 0;
378    _Mutex_Acquire_slow(
379      &mutex->Mutex,
380      owner,
381      executing,
382      ticks,
383      &lock_context
384    );
385
386    return (int) executing->Wait.return_code;
387  }
388}
389
390int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
391{
392  Mutex_recursive_Control *mutex;
393  ISR_lock_Context         lock_context;
394  Thread_Control          *executing;
395  Thread_Control          *owner;
396  int                      eno;
397
398  mutex = _Mutex_recursive_Get( _mutex );
399  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
400
401  owner = mutex->Mutex.owner;
402
403  if ( __predict_true( owner == NULL ) ) {
404    mutex->Mutex.owner = executing;
405    ++executing->resource_count;
406    eno = 0;
407  } else if ( owner == executing ) {
408    ++mutex->nest_level;
409    eno = 0;
410  } else {
411    eno = EBUSY;
412  }
413
414  _Mutex_Queue_release( &mutex->Mutex, &lock_context );
415
416  return eno;
417}
418
419void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
420{
421  Mutex_recursive_Control *mutex;
422  ISR_lock_Context         lock_context;
423  Thread_Control          *executing;
424  unsigned int             nest_level;
425
426  mutex = _Mutex_recursive_Get( _mutex );
427  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
428
429  _Assert( mutex->Mutex.owner == executing );
430
431  nest_level = mutex->nest_level;
432
433  if ( __predict_true( nest_level == 0 ) ) {
434    _Mutex_Release_critical( &mutex->Mutex, executing, &lock_context );
435  } else {
436    mutex->nest_level = nest_level - 1;
437
438    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
439  }
440}
441
442#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */
Note: See TracBrowser for help on using the repository browser.