source: rtems/cpukit/score/src/mutex.c @ dfcc8bb

5
Last change on this file since dfcc8bb was dfcc8bb, checked in by Sebastian Huber <sebastian.huber@…>, on 05/03/16 at 05:43:54

score: Adjust thread queue layout

Adjust thread queue layout according to Newlib. This makes it possible
to use the same implementation for <sys/lock.h> and CORE mutexes in the
future.

  • Property mode set to 100644
File size: 10.8 KB
RevLine 
[214d8ed]1/*
2 * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#if HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#if HAVE_STRUCT__THREAD_QUEUE_QUEUE
20
21#include <sys/lock.h>
22#include <errno.h>
23
24#include <rtems/score/assert.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/threadqimpl.h>
27#include <rtems/score/todimpl.h>
28
29#define MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority
30
31typedef struct {
32  Thread_queue_Syslock_queue Queue;
33} Mutex_Control;
34
35RTEMS_STATIC_ASSERT(
36  offsetof( Mutex_Control, Queue )
37    == offsetof( struct _Mutex_Control, _Queue ),
38  MUTEX_CONTROL_QUEUE
39);
40
41RTEMS_STATIC_ASSERT(
42  sizeof( Mutex_Control ) == sizeof( struct _Mutex_Control ),
43  MUTEX_CONTROL_SIZE
44);
45
46typedef struct {
47  Mutex_Control Mutex;
48  unsigned int nest_level;
49} Mutex_recursive_Control;
50
51RTEMS_STATIC_ASSERT(
52  offsetof( Mutex_recursive_Control, Mutex )
53    == offsetof( struct _Mutex_recursive_Control, _Mutex ),
54  MUTEX_RECURSIVE_CONTROL_MUTEX
55);
56
57RTEMS_STATIC_ASSERT(
58  offsetof( Mutex_recursive_Control, nest_level )
59    == offsetof( struct _Mutex_recursive_Control, _nest_level ),
60  MUTEX_RECURSIVE_CONTROL_NEST_LEVEL
61);
62
63RTEMS_STATIC_ASSERT(
64  sizeof( Mutex_recursive_Control )
65    == sizeof( struct _Mutex_recursive_Control ),
66  MUTEX_RECURSIVE_CONTROL_SIZE
67);
68
69static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
70{
71  return (Mutex_Control *) _mutex;
72}
73
74static Thread_Control *_Mutex_Queue_acquire(
75  Mutex_Control    *mutex,
76  ISR_lock_Context *lock_context
77)
78{
79  Thread_Control *executing;
80
81  _ISR_lock_ISR_disable( lock_context );
82  executing = _Thread_Executing;
83  _Thread_queue_Queue_acquire_critical(
84    &mutex->Queue.Queue,
85    &executing->Potpourri_stats,
86    lock_context
87  );
88
89  return executing;
90}
91
92static void _Mutex_Queue_release(
93  Mutex_Control    *mutex,
94  ISR_lock_Context *lock_context
95)
96{
97  _Thread_queue_Queue_release( &mutex->Queue.Queue, lock_context );
98}
99
100static void _Mutex_Acquire_slow(
101  Mutex_Control     *mutex,
102  Thread_Control    *owner,
103  Thread_Control    *executing,
104  Watchdog_Interval  timeout,
105  ISR_lock_Context  *lock_context
106)
107{
[dafa5d88]108  _Thread_Inherit_priority( owner, executing );
[214d8ed]109  _Thread_queue_Enqueue_critical(
110    &mutex->Queue.Queue,
111    MUTEX_TQ_OPERATIONS,
112    executing,
113    STATES_WAITING_FOR_SYS_LOCK_MUTEX,
114    timeout,
115    lock_context
116  );
117}
118
119static void _Mutex_Release_slow(
[631b3c8]120  Mutex_Control        *mutex,
121  Thread_Control       *executing,
122  Thread_queue_Heads   *heads,
123  bool                  keep_priority,
124  Thread_queue_Context *queue_context
[214d8ed]125)
126{
127  if (heads != NULL) {
128    const Thread_queue_Operations *operations;
[dafa5d88]129    Thread_Control                *first;
130    bool                           unblock;
[214d8ed]131
132    operations = MUTEX_TQ_OPERATIONS;
133    first = ( *operations->first )( heads );
134
[dfcc8bb]135    mutex->Queue.Queue.owner = first;
[314ff3c4]136    ++first->resource_count;
[dafa5d88]137    unblock = _Thread_queue_Extract_locked(
[214d8ed]138      &mutex->Queue.Queue,
139      operations,
[8f96581]140      first,
[631b3c8]141      queue_context
[dafa5d88]142    );
143    _Thread_queue_Boost_priority( &mutex->Queue.Queue, first );
144    _Thread_queue_Unblock_critical(
145      unblock,
146      &mutex->Queue.Queue,
[214d8ed]147      first,
[631b3c8]148      &queue_context->Lock_context
[214d8ed]149    );
150  } else {
[631b3c8]151    _Mutex_Queue_release( mutex, &queue_context->Lock_context );
[214d8ed]152  }
153
154  if ( !keep_priority ) {
155    Per_CPU_Control *cpu_self;
156
157    cpu_self = _Thread_Dispatch_disable();
158    _Thread_Restore_priority( executing );
159    _Thread_Dispatch_enable( cpu_self );
160  }
161}
162
163static void _Mutex_Release_critical(
[631b3c8]164  Mutex_Control        *mutex,
165  Thread_Control       *executing,
166  Thread_queue_Context *queue_context
[214d8ed]167)
168{
169  Thread_queue_Heads *heads;
170  bool keep_priority;
171
[dfcc8bb]172  mutex->Queue.Queue.owner = NULL;
[214d8ed]173
174  --executing->resource_count;
175
176  /*
177   * Ensure that the owner resource count is visible to all other
178   * processors and that we read the latest priority restore
179   * hint.
180   */
181  _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
182
183  heads = mutex->Queue.Queue.heads;
184  keep_priority = _Thread_Owns_resources( executing )
185    || !executing->priority_restore_hint;
186
187  if ( __predict_true( heads == NULL && keep_priority ) ) {
[631b3c8]188    _Mutex_Queue_release( mutex, &queue_context->Lock_context );
[214d8ed]189  } else {
190    _Mutex_Release_slow(
191      mutex,
192      executing,
193      heads,
194      keep_priority,
[631b3c8]195      queue_context
[214d8ed]196    );
197  }
198}
199
200void _Mutex_Acquire( struct _Mutex_Control *_mutex )
201{
202  Mutex_Control    *mutex;
203  ISR_lock_Context  lock_context;
204  Thread_Control   *executing;
205  Thread_Control   *owner;
206
207  mutex = _Mutex_Get( _mutex );
208  executing = _Mutex_Queue_acquire( mutex, &lock_context );
209
[dfcc8bb]210  owner = mutex->Queue.Queue.owner;
[214d8ed]211
212  if ( __predict_true( owner == NULL ) ) {
[dfcc8bb]213    mutex->Queue.Queue.owner = executing;
[314ff3c4]214    ++executing->resource_count;
[214d8ed]215    _Mutex_Queue_release( mutex, &lock_context );
216  } else {
217    _Mutex_Acquire_slow( mutex, owner, executing, 0, &lock_context );
218  }
219}
220
221int _Mutex_Acquire_timed(
222  struct _Mutex_Control *_mutex,
223  const struct timespec *abstime
224)
225{
226  Mutex_Control    *mutex;
227  ISR_lock_Context  lock_context;
228  Thread_Control   *executing;
229  Thread_Control   *owner;
230
231  mutex = _Mutex_Get( _mutex );
232  executing = _Mutex_Queue_acquire( mutex, &lock_context );
233
[dfcc8bb]234  owner = mutex->Queue.Queue.owner;
[214d8ed]235
236  if ( __predict_true( owner == NULL ) ) {
[dfcc8bb]237    mutex->Queue.Queue.owner = executing;
[314ff3c4]238    ++executing->resource_count;
[214d8ed]239    _Mutex_Queue_release( mutex, &lock_context );
240
241    return 0;
242  } else {
243    Watchdog_Interval ticks;
244
245    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
246      case TOD_ABSOLUTE_TIMEOUT_INVALID:
247        _Mutex_Queue_release( mutex, &lock_context );
248        return EINVAL;
249      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
250      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
251        _Mutex_Queue_release( mutex, &lock_context );
252        return ETIMEDOUT;
253      default:
254        break;
255    }
256
257    _Mutex_Acquire_slow( mutex, owner, executing, ticks, &lock_context );
258
[dce48791]259    return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
[214d8ed]260  }
261}
262
263int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
264{
265  Mutex_Control    *mutex;
266  ISR_lock_Context  lock_context;
267  Thread_Control   *executing;
268  Thread_Control   *owner;
[44f3ea9]269  int               eno;
[214d8ed]270
271  mutex = _Mutex_Get( _mutex );
272  executing = _Mutex_Queue_acquire( mutex, &lock_context );
273
[dfcc8bb]274  owner = mutex->Queue.Queue.owner;
[214d8ed]275
276  if ( __predict_true( owner == NULL ) ) {
[dfcc8bb]277    mutex->Queue.Queue.owner = executing;
[214d8ed]278    ++executing->resource_count;
[44f3ea9]279    eno = 0;
[214d8ed]280  } else {
[44f3ea9]281    eno = EBUSY;
[214d8ed]282  }
283
284  _Mutex_Queue_release( mutex, &lock_context );
285
[44f3ea9]286  return eno;
[214d8ed]287}
288
289void _Mutex_Release( struct _Mutex_Control *_mutex )
290{
[631b3c8]291  Mutex_Control        *mutex;
292  Thread_queue_Context  queue_context;
293  Thread_Control       *executing;
[214d8ed]294
295  mutex = _Mutex_Get( _mutex );
[631b3c8]296  _Thread_queue_Context_initialize( &queue_context, NULL );
297  executing = _Mutex_Queue_acquire( mutex, &queue_context.Lock_context );
[214d8ed]298
[dfcc8bb]299  _Assert( mutex->Queue.Queue.owner == executing );
[214d8ed]300
[631b3c8]301  _Mutex_Release_critical( mutex, executing, &queue_context );
[214d8ed]302}
303
304static Mutex_recursive_Control *_Mutex_recursive_Get(
305  struct _Mutex_recursive_Control *_mutex
306)
307{
308  return (Mutex_recursive_Control *) _mutex;
309}
310
311void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
312{
313  Mutex_recursive_Control *mutex;
314  ISR_lock_Context         lock_context;
315  Thread_Control          *executing;
316  Thread_Control          *owner;
317
318  mutex = _Mutex_recursive_Get( _mutex );
319  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
320
[dfcc8bb]321  owner = mutex->Mutex.Queue.Queue.owner;
[214d8ed]322
323  if ( __predict_true( owner == NULL ) ) {
[dfcc8bb]324    mutex->Mutex.Queue.Queue.owner = executing;
[214d8ed]325    ++executing->resource_count;
326    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
327  } else if ( owner == executing ) {
328    ++mutex->nest_level;
329    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
330  } else {
331    _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &lock_context );
332  }
333}
334
335int _Mutex_recursive_Acquire_timed(
336  struct _Mutex_recursive_Control *_mutex,
337  const struct timespec           *abstime
338)
339{
340  Mutex_recursive_Control *mutex;
341  ISR_lock_Context         lock_context;
342  Thread_Control          *executing;
343  Thread_Control          *owner;
344
345  mutex = _Mutex_recursive_Get( _mutex );
346  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
347
[dfcc8bb]348  owner = mutex->Mutex.Queue.Queue.owner;
[214d8ed]349
350  if ( __predict_true( owner == NULL ) ) {
[dfcc8bb]351    mutex->Mutex.Queue.Queue.owner = executing;
[214d8ed]352    ++executing->resource_count;
353    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
354
355    return 0;
356  } else if ( owner == executing ) {
357    ++mutex->nest_level;
358    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
359
360    return 0;
361  } else {
362    Watchdog_Interval ticks;
363
364    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
365      case TOD_ABSOLUTE_TIMEOUT_INVALID:
366        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
367        return EINVAL;
368      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
369      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
370        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
371        return ETIMEDOUT;
372      default:
373        break;
374    }
375
376    _Mutex_Acquire_slow(
377      &mutex->Mutex,
378      owner,
379      executing,
380      ticks,
381      &lock_context
382    );
383
[dce48791]384    return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
[214d8ed]385  }
386}
387
388int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
389{
390  Mutex_recursive_Control *mutex;
391  ISR_lock_Context         lock_context;
392  Thread_Control          *executing;
393  Thread_Control          *owner;
[44f3ea9]394  int                      eno;
[214d8ed]395
396  mutex = _Mutex_recursive_Get( _mutex );
397  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
398
[dfcc8bb]399  owner = mutex->Mutex.Queue.Queue.owner;
[214d8ed]400
401  if ( __predict_true( owner == NULL ) ) {
[dfcc8bb]402    mutex->Mutex.Queue.Queue.owner = executing;
[214d8ed]403    ++executing->resource_count;
[44f3ea9]404    eno = 0;
[214d8ed]405  } else if ( owner == executing ) {
406    ++mutex->nest_level;
[44f3ea9]407    eno = 0;
[214d8ed]408  } else {
[44f3ea9]409    eno = EBUSY;
[214d8ed]410  }
411
412  _Mutex_Queue_release( &mutex->Mutex, &lock_context );
413
[44f3ea9]414  return eno;
[214d8ed]415}
416
417void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
418{
419  Mutex_recursive_Control *mutex;
[631b3c8]420  Thread_queue_Context     queue_context;
[214d8ed]421  Thread_Control          *executing;
422  unsigned int             nest_level;
423
424  mutex = _Mutex_recursive_Get( _mutex );
[631b3c8]425  _Thread_queue_Context_initialize( &queue_context, NULL );
426  executing = _Mutex_Queue_acquire(
427    &mutex->Mutex,
428    &queue_context.Lock_context
429  );
[214d8ed]430
[dfcc8bb]431  _Assert( mutex->Mutex.Queue.Queue.owner == executing );
[214d8ed]432
433  nest_level = mutex->nest_level;
434
435  if ( __predict_true( nest_level == 0 ) ) {
[631b3c8]436    _Mutex_Release_critical( &mutex->Mutex, executing, &queue_context );
[214d8ed]437  } else {
438    mutex->nest_level = nest_level - 1;
439
[631b3c8]440    _Mutex_Queue_release( &mutex->Mutex, &queue_context.Lock_context );
[214d8ed]441  }
442}
443
444#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */
Note: See TracBrowser for help on using the repository browser.