source: rtems/cpukit/score/src/mutex.c @ dce48791

5
Last change on this file since dce48791 was dce48791, checked in by Sebastian Huber <sebastian.huber@…>, on 05/23/16 at 11:37:59

score: Add Status_Control for all APIs

Unify the status codes of the Classic and POSIX API to use the new enum
Status_Control. This eliminates the Thread_Control::Wait::timeout_code
field and the timeout parameter of _Thread_queue_Enqueue_critical() and
_MPCI_Send_request_packet(). It gets rid of the status code translation
tables and instead uses simple bit operations to get the status for a
particular API. This enables translation of status code constants at
compile time. Add _Thread_Wait_get_status() to avoid direct access of
thread internal data structures.

  • Property mode set to 100644
File size: 10.8 KB
RevLine 
[214d8ed]1/*
2 * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#if HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#if HAVE_STRUCT__THREAD_QUEUE_QUEUE
20
21#include <sys/lock.h>
22#include <errno.h>
23
24#include <rtems/score/assert.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/threadqimpl.h>
27#include <rtems/score/todimpl.h>
28
29#define MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority
30
31typedef struct {
32  Thread_queue_Syslock_queue Queue;
33  Thread_Control *owner;
34} Mutex_Control;
35
36RTEMS_STATIC_ASSERT(
37  offsetof( Mutex_Control, Queue )
38    == offsetof( struct _Mutex_Control, _Queue ),
39  MUTEX_CONTROL_QUEUE
40);
41
42RTEMS_STATIC_ASSERT(
43  offsetof( Mutex_Control, owner )
44    == offsetof( struct _Mutex_Control, _owner ),
45  MUTEX_CONTROL_OWNER
46);
47
48RTEMS_STATIC_ASSERT(
49  sizeof( Mutex_Control ) == sizeof( struct _Mutex_Control ),
50  MUTEX_CONTROL_SIZE
51);
52
53typedef struct {
54  Mutex_Control Mutex;
55  unsigned int nest_level;
56} Mutex_recursive_Control;
57
58RTEMS_STATIC_ASSERT(
59  offsetof( Mutex_recursive_Control, Mutex )
60    == offsetof( struct _Mutex_recursive_Control, _Mutex ),
61  MUTEX_RECURSIVE_CONTROL_MUTEX
62);
63
64RTEMS_STATIC_ASSERT(
65  offsetof( Mutex_recursive_Control, nest_level )
66    == offsetof( struct _Mutex_recursive_Control, _nest_level ),
67  MUTEX_RECURSIVE_CONTROL_NEST_LEVEL
68);
69
70RTEMS_STATIC_ASSERT(
71  sizeof( Mutex_recursive_Control )
72    == sizeof( struct _Mutex_recursive_Control ),
73  MUTEX_RECURSIVE_CONTROL_SIZE
74);
75
76static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
77{
78  return (Mutex_Control *) _mutex;
79}
80
81static Thread_Control *_Mutex_Queue_acquire(
82  Mutex_Control    *mutex,
83  ISR_lock_Context *lock_context
84)
85{
86  Thread_Control *executing;
87
88  _ISR_lock_ISR_disable( lock_context );
89  executing = _Thread_Executing;
90  _Thread_queue_Queue_acquire_critical(
91    &mutex->Queue.Queue,
92    &executing->Potpourri_stats,
93    lock_context
94  );
95
96  return executing;
97}
98
99static void _Mutex_Queue_release(
100  Mutex_Control    *mutex,
101  ISR_lock_Context *lock_context
102)
103{
104  _Thread_queue_Queue_release( &mutex->Queue.Queue, lock_context );
105}
106
107static void _Mutex_Acquire_slow(
108  Mutex_Control     *mutex,
109  Thread_Control    *owner,
110  Thread_Control    *executing,
111  Watchdog_Interval  timeout,
112  ISR_lock_Context  *lock_context
113)
114{
[dafa5d88]115  _Thread_Inherit_priority( owner, executing );
[214d8ed]116  _Thread_queue_Enqueue_critical(
117    &mutex->Queue.Queue,
118    MUTEX_TQ_OPERATIONS,
119    executing,
120    STATES_WAITING_FOR_SYS_LOCK_MUTEX,
121    timeout,
122    lock_context
123  );
124}
125
126static void _Mutex_Release_slow(
[631b3c8]127  Mutex_Control        *mutex,
128  Thread_Control       *executing,
129  Thread_queue_Heads   *heads,
130  bool                  keep_priority,
131  Thread_queue_Context *queue_context
[214d8ed]132)
133{
134  if (heads != NULL) {
135    const Thread_queue_Operations *operations;
[dafa5d88]136    Thread_Control                *first;
137    bool                           unblock;
[214d8ed]138
139    operations = MUTEX_TQ_OPERATIONS;
140    first = ( *operations->first )( heads );
141
142    mutex->owner = first;
[314ff3c4]143    ++first->resource_count;
[dafa5d88]144    unblock = _Thread_queue_Extract_locked(
[214d8ed]145      &mutex->Queue.Queue,
146      operations,
[8f96581]147      first,
[631b3c8]148      queue_context
[dafa5d88]149    );
150    _Thread_queue_Boost_priority( &mutex->Queue.Queue, first );
151    _Thread_queue_Unblock_critical(
152      unblock,
153      &mutex->Queue.Queue,
[214d8ed]154      first,
[631b3c8]155      &queue_context->Lock_context
[214d8ed]156    );
157  } else {
[631b3c8]158    _Mutex_Queue_release( mutex, &queue_context->Lock_context );
[214d8ed]159  }
160
161  if ( !keep_priority ) {
162    Per_CPU_Control *cpu_self;
163
164    cpu_self = _Thread_Dispatch_disable();
165    _Thread_Restore_priority( executing );
166    _Thread_Dispatch_enable( cpu_self );
167  }
168}
169
170static void _Mutex_Release_critical(
[631b3c8]171  Mutex_Control        *mutex,
172  Thread_Control       *executing,
173  Thread_queue_Context *queue_context
[214d8ed]174)
175{
176  Thread_queue_Heads *heads;
177  bool keep_priority;
178
179  mutex->owner = NULL;
180
181  --executing->resource_count;
182
183  /*
184   * Ensure that the owner resource count is visible to all other
185   * processors and that we read the latest priority restore
186   * hint.
187   */
188  _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
189
190  heads = mutex->Queue.Queue.heads;
191  keep_priority = _Thread_Owns_resources( executing )
192    || !executing->priority_restore_hint;
193
194  if ( __predict_true( heads == NULL && keep_priority ) ) {
[631b3c8]195    _Mutex_Queue_release( mutex, &queue_context->Lock_context );
[214d8ed]196  } else {
197    _Mutex_Release_slow(
198      mutex,
199      executing,
200      heads,
201      keep_priority,
[631b3c8]202      queue_context
[214d8ed]203    );
204  }
205}
206
207void _Mutex_Acquire( struct _Mutex_Control *_mutex )
208{
209  Mutex_Control    *mutex;
210  ISR_lock_Context  lock_context;
211  Thread_Control   *executing;
212  Thread_Control   *owner;
213
214  mutex = _Mutex_Get( _mutex );
215  executing = _Mutex_Queue_acquire( mutex, &lock_context );
216
217  owner = mutex->owner;
218
219  if ( __predict_true( owner == NULL ) ) {
220    mutex->owner = executing;
[314ff3c4]221    ++executing->resource_count;
[214d8ed]222    _Mutex_Queue_release( mutex, &lock_context );
223  } else {
224    _Mutex_Acquire_slow( mutex, owner, executing, 0, &lock_context );
225  }
226}
227
228int _Mutex_Acquire_timed(
229  struct _Mutex_Control *_mutex,
230  const struct timespec *abstime
231)
232{
233  Mutex_Control    *mutex;
234  ISR_lock_Context  lock_context;
235  Thread_Control   *executing;
236  Thread_Control   *owner;
237
238  mutex = _Mutex_Get( _mutex );
239  executing = _Mutex_Queue_acquire( mutex, &lock_context );
240
241  owner = mutex->owner;
242
243  if ( __predict_true( owner == NULL ) ) {
244    mutex->owner = executing;
[314ff3c4]245    ++executing->resource_count;
[214d8ed]246    _Mutex_Queue_release( mutex, &lock_context );
247
248    return 0;
249  } else {
250    Watchdog_Interval ticks;
251
252    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
253      case TOD_ABSOLUTE_TIMEOUT_INVALID:
254        _Mutex_Queue_release( mutex, &lock_context );
255        return EINVAL;
256      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
257      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
258        _Mutex_Queue_release( mutex, &lock_context );
259        return ETIMEDOUT;
260      default:
261        break;
262    }
263
264    _Mutex_Acquire_slow( mutex, owner, executing, ticks, &lock_context );
265
[dce48791]266    return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
[214d8ed]267  }
268}
269
270int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
271{
272  Mutex_Control    *mutex;
273  ISR_lock_Context  lock_context;
274  Thread_Control   *executing;
275  Thread_Control   *owner;
[44f3ea9]276  int               eno;
[214d8ed]277
278  mutex = _Mutex_Get( _mutex );
279  executing = _Mutex_Queue_acquire( mutex, &lock_context );
280
281  owner = mutex->owner;
282
283  if ( __predict_true( owner == NULL ) ) {
284    mutex->owner = executing;
285    ++executing->resource_count;
[44f3ea9]286    eno = 0;
[214d8ed]287  } else {
[44f3ea9]288    eno = EBUSY;
[214d8ed]289  }
290
291  _Mutex_Queue_release( mutex, &lock_context );
292
[44f3ea9]293  return eno;
[214d8ed]294}
295
296void _Mutex_Release( struct _Mutex_Control *_mutex )
297{
[631b3c8]298  Mutex_Control        *mutex;
299  Thread_queue_Context  queue_context;
300  Thread_Control       *executing;
[214d8ed]301
302  mutex = _Mutex_Get( _mutex );
[631b3c8]303  _Thread_queue_Context_initialize( &queue_context, NULL );
304  executing = _Mutex_Queue_acquire( mutex, &queue_context.Lock_context );
[214d8ed]305
306  _Assert( mutex->owner == executing );
307
[631b3c8]308  _Mutex_Release_critical( mutex, executing, &queue_context );
[214d8ed]309}
310
311static Mutex_recursive_Control *_Mutex_recursive_Get(
312  struct _Mutex_recursive_Control *_mutex
313)
314{
315  return (Mutex_recursive_Control *) _mutex;
316}
317
318void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
319{
320  Mutex_recursive_Control *mutex;
321  ISR_lock_Context         lock_context;
322  Thread_Control          *executing;
323  Thread_Control          *owner;
324
325  mutex = _Mutex_recursive_Get( _mutex );
326  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
327
328  owner = mutex->Mutex.owner;
329
330  if ( __predict_true( owner == NULL ) ) {
331    mutex->Mutex.owner = executing;
332    ++executing->resource_count;
333    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
334  } else if ( owner == executing ) {
335    ++mutex->nest_level;
336    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
337  } else {
338    _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &lock_context );
339  }
340}
341
342int _Mutex_recursive_Acquire_timed(
343  struct _Mutex_recursive_Control *_mutex,
344  const struct timespec           *abstime
345)
346{
347  Mutex_recursive_Control *mutex;
348  ISR_lock_Context         lock_context;
349  Thread_Control          *executing;
350  Thread_Control          *owner;
351
352  mutex = _Mutex_recursive_Get( _mutex );
353  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
354
355  owner = mutex->Mutex.owner;
356
357  if ( __predict_true( owner == NULL ) ) {
358    mutex->Mutex.owner = executing;
359    ++executing->resource_count;
360    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
361
362    return 0;
363  } else if ( owner == executing ) {
364    ++mutex->nest_level;
365    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
366
367    return 0;
368  } else {
369    Watchdog_Interval ticks;
370
371    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
372      case TOD_ABSOLUTE_TIMEOUT_INVALID:
373        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
374        return EINVAL;
375      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
376      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
377        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
378        return ETIMEDOUT;
379      default:
380        break;
381    }
382
383    _Mutex_Acquire_slow(
384      &mutex->Mutex,
385      owner,
386      executing,
387      ticks,
388      &lock_context
389    );
390
[dce48791]391    return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
[214d8ed]392  }
393}
394
395int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
396{
397  Mutex_recursive_Control *mutex;
398  ISR_lock_Context         lock_context;
399  Thread_Control          *executing;
400  Thread_Control          *owner;
[44f3ea9]401  int                      eno;
[214d8ed]402
403  mutex = _Mutex_recursive_Get( _mutex );
404  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
405
406  owner = mutex->Mutex.owner;
407
408  if ( __predict_true( owner == NULL ) ) {
409    mutex->Mutex.owner = executing;
410    ++executing->resource_count;
[44f3ea9]411    eno = 0;
[214d8ed]412  } else if ( owner == executing ) {
413    ++mutex->nest_level;
[44f3ea9]414    eno = 0;
[214d8ed]415  } else {
[44f3ea9]416    eno = EBUSY;
[214d8ed]417  }
418
419  _Mutex_Queue_release( &mutex->Mutex, &lock_context );
420
[44f3ea9]421  return eno;
[214d8ed]422}
423
424void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
425{
426  Mutex_recursive_Control *mutex;
[631b3c8]427  Thread_queue_Context     queue_context;
[214d8ed]428  Thread_Control          *executing;
429  unsigned int             nest_level;
430
431  mutex = _Mutex_recursive_Get( _mutex );
[631b3c8]432  _Thread_queue_Context_initialize( &queue_context, NULL );
433  executing = _Mutex_Queue_acquire(
434    &mutex->Mutex,
435    &queue_context.Lock_context
436  );
[214d8ed]437
438  _Assert( mutex->Mutex.owner == executing );
439
440  nest_level = mutex->nest_level;
441
442  if ( __predict_true( nest_level == 0 ) ) {
[631b3c8]443    _Mutex_Release_critical( &mutex->Mutex, executing, &queue_context );
[214d8ed]444  } else {
445    mutex->nest_level = nest_level - 1;
446
[631b3c8]447    _Mutex_Queue_release( &mutex->Mutex, &queue_context.Lock_context );
[214d8ed]448  }
449}
450
451#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */
Note: See TracBrowser for help on using the repository browser.