source: rtems/cpukit/score/src/mutex.c @ e41308ea

5
Last change on this file since e41308ea was e41308ea, checked in by Sebastian Huber <sebastian.huber@…>, on 08/22/16 at 08:58:34

score: Introduce Thread_queue_Lock_context

Introduce Thread_queue_Lock_context to contain the context necessary for
thread queue lock and thread wait lock acquire/release operations to
reduce the Thread_Control size.

  • Property mode set to 100644
File size: 10.6 KB
Line 
1/*
2 * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#if HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#if HAVE_STRUCT__THREAD_QUEUE_QUEUE
20
21#include <sys/lock.h>
22#include <errno.h>
23
24#include <rtems/score/assert.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/threadqimpl.h>
27#include <rtems/score/todimpl.h>
28
29#define MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority_inherit
30
31typedef struct {
32  Thread_queue_Syslock_queue Queue;
33} Mutex_Control;
34
35RTEMS_STATIC_ASSERT(
36  offsetof( Mutex_Control, Queue )
37    == offsetof( struct _Mutex_Control, _Queue ),
38  MUTEX_CONTROL_QUEUE
39);
40
41RTEMS_STATIC_ASSERT(
42  sizeof( Mutex_Control ) == sizeof( struct _Mutex_Control ),
43  MUTEX_CONTROL_SIZE
44);
45
46typedef struct {
47  Mutex_Control Mutex;
48  unsigned int nest_level;
49} Mutex_recursive_Control;
50
51RTEMS_STATIC_ASSERT(
52  offsetof( Mutex_recursive_Control, Mutex )
53    == offsetof( struct _Mutex_recursive_Control, _Mutex ),
54  MUTEX_RECURSIVE_CONTROL_MUTEX
55);
56
57RTEMS_STATIC_ASSERT(
58  offsetof( Mutex_recursive_Control, nest_level )
59    == offsetof( struct _Mutex_recursive_Control, _nest_level ),
60  MUTEX_RECURSIVE_CONTROL_NEST_LEVEL
61);
62
63RTEMS_STATIC_ASSERT(
64  sizeof( Mutex_recursive_Control )
65    == sizeof( struct _Mutex_recursive_Control ),
66  MUTEX_RECURSIVE_CONTROL_SIZE
67);
68
69static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
70{
71  return (Mutex_Control *) _mutex;
72}
73
74static Thread_Control *_Mutex_Queue_acquire(
75  Mutex_Control        *mutex,
76  Thread_queue_Context *queue_context
77)
78{
79  Thread_Control *executing;
80
81  _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
82  executing = _Thread_Executing;
83  _Thread_queue_Queue_acquire_critical(
84    &mutex->Queue.Queue,
85    &executing->Potpourri_stats,
86    &queue_context->Lock_context.Lock_context
87  );
88
89  return executing;
90}
91
92static void _Mutex_Queue_release(
93  Mutex_Control        *mutex,
94  Thread_queue_Context *queue_context
95)
96{
97  _Thread_queue_Queue_release(
98    &mutex->Queue.Queue,
99    &queue_context->Lock_context.Lock_context
100  );
101}
102
103static void _Mutex_Acquire_slow(
104  Mutex_Control        *mutex,
105  Thread_Control       *owner,
106  Thread_Control       *executing,
107  Thread_queue_Context *queue_context
108)
109{
110  _Thread_queue_Context_set_expected_level( queue_context, 1 );
111  _Thread_queue_Context_set_deadlock_callout(
112    queue_context,
113    _Thread_queue_Deadlock_fatal
114  );
115  _Thread_queue_Enqueue_critical(
116    &mutex->Queue.Queue,
117    MUTEX_TQ_OPERATIONS,
118    executing,
119    STATES_WAITING_FOR_SYS_LOCK_MUTEX,
120    queue_context
121  );
122}
123
124static void _Mutex_Release_critical(
125  Mutex_Control        *mutex,
126  Thread_Control       *executing,
127  Thread_queue_Context *queue_context
128)
129{
130  Thread_queue_Heads *heads;
131  bool keep_priority;
132
133  mutex->Queue.Queue.owner = NULL;
134
135  --executing->resource_count;
136
137  /*
138   * Ensure that the owner resource count is visible to all other
139   * processors and that we read the latest priority restore
140   * hint.
141   */
142  _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
143
144  heads = mutex->Queue.Queue.heads;
145  keep_priority = _Thread_Owns_resources( executing )
146    || !executing->priority_restore_hint;
147
148  if ( __predict_true( heads == NULL && keep_priority ) ) {
149    _Mutex_Queue_release( mutex, queue_context );
150  } else {
151    _Thread_queue_Surrender(
152      &mutex->Queue.Queue,
153      MUTEX_TQ_OPERATIONS,
154      heads,
155      executing,
156      keep_priority,
157      queue_context
158    );
159  }
160}
161
162void _Mutex_Acquire( struct _Mutex_Control *_mutex )
163{
164  Mutex_Control        *mutex;
165  Thread_queue_Context  queue_context;
166  Thread_Control       *executing;
167  Thread_Control       *owner;
168
169  mutex = _Mutex_Get( _mutex );
170  _Thread_queue_Context_initialize( &queue_context );
171  executing = _Mutex_Queue_acquire( mutex, &queue_context );
172
173  owner = mutex->Queue.Queue.owner;
174
175  if ( __predict_true( owner == NULL ) ) {
176    mutex->Queue.Queue.owner = executing;
177    ++executing->resource_count;
178    _Mutex_Queue_release( mutex, &queue_context );
179  } else {
180    _Thread_queue_Context_set_no_timeout( &queue_context );
181    _Mutex_Acquire_slow( mutex, owner, executing, &queue_context );
182  }
183}
184
185int _Mutex_Acquire_timed(
186  struct _Mutex_Control *_mutex,
187  const struct timespec *abstime
188)
189{
190  Mutex_Control        *mutex;
191  Thread_queue_Context  queue_context;
192  Thread_Control       *executing;
193  Thread_Control       *owner;
194
195  mutex = _Mutex_Get( _mutex );
196  _Thread_queue_Context_initialize( &queue_context );
197  executing = _Mutex_Queue_acquire( mutex, &queue_context );
198
199  owner = mutex->Queue.Queue.owner;
200
201  if ( __predict_true( owner == NULL ) ) {
202    mutex->Queue.Queue.owner = executing;
203    ++executing->resource_count;
204    _Mutex_Queue_release( mutex, &queue_context );
205
206    return 0;
207  } else {
208    Watchdog_Interval ticks;
209
210    switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
211      case TOD_ABSOLUTE_TIMEOUT_INVALID:
212        _Mutex_Queue_release( mutex, &queue_context );
213        return EINVAL;
214      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
215      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
216        _Mutex_Queue_release( mutex, &queue_context );
217        return ETIMEDOUT;
218      default:
219        break;
220    }
221
222    _Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
223    _Mutex_Acquire_slow( mutex, owner, executing, &queue_context );
224
225    return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
226  }
227}
228
229int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
230{
231  Mutex_Control        *mutex;
232  Thread_queue_Context  queue_context;
233  Thread_Control       *executing;
234  Thread_Control       *owner;
235  int                   eno;
236
237  mutex = _Mutex_Get( _mutex );
238  _Thread_queue_Context_initialize( &queue_context );
239  executing = _Mutex_Queue_acquire( mutex, &queue_context );
240
241  owner = mutex->Queue.Queue.owner;
242
243  if ( __predict_true( owner == NULL ) ) {
244    mutex->Queue.Queue.owner = executing;
245    ++executing->resource_count;
246    eno = 0;
247  } else {
248    eno = EBUSY;
249  }
250
251  _Mutex_Queue_release( mutex, &queue_context );
252
253  return eno;
254}
255
256void _Mutex_Release( struct _Mutex_Control *_mutex )
257{
258  Mutex_Control        *mutex;
259  Thread_queue_Context  queue_context;
260  Thread_Control       *executing;
261
262  mutex = _Mutex_Get( _mutex );
263  _Thread_queue_Context_initialize( &queue_context );
264  executing = _Mutex_Queue_acquire( mutex, &queue_context );
265
266  _Assert( mutex->Queue.Queue.owner == executing );
267
268  _Mutex_Release_critical( mutex, executing, &queue_context );
269}
270
271static Mutex_recursive_Control *_Mutex_recursive_Get(
272  struct _Mutex_recursive_Control *_mutex
273)
274{
275  return (Mutex_recursive_Control *) _mutex;
276}
277
278void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
279{
280  Mutex_recursive_Control *mutex;
281  Thread_queue_Context     queue_context;
282  Thread_Control          *executing;
283  Thread_Control          *owner;
284
285  mutex = _Mutex_recursive_Get( _mutex );
286  _Thread_queue_Context_initialize( &queue_context );
287  executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
288
289  owner = mutex->Mutex.Queue.Queue.owner;
290
291  if ( __predict_true( owner == NULL ) ) {
292    mutex->Mutex.Queue.Queue.owner = executing;
293    ++executing->resource_count;
294    _Mutex_Queue_release( &mutex->Mutex, &queue_context );
295  } else if ( owner == executing ) {
296    ++mutex->nest_level;
297    _Mutex_Queue_release( &mutex->Mutex, &queue_context );
298  } else {
299    _Thread_queue_Context_set_no_timeout( &queue_context );
300    _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, &queue_context );
301  }
302}
303
304int _Mutex_recursive_Acquire_timed(
305  struct _Mutex_recursive_Control *_mutex,
306  const struct timespec           *abstime
307)
308{
309  Mutex_recursive_Control *mutex;
310  Thread_queue_Context     queue_context;
311  Thread_Control          *executing;
312  Thread_Control          *owner;
313
314  mutex = _Mutex_recursive_Get( _mutex );
315  _Thread_queue_Context_initialize( &queue_context );
316  executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
317
318  owner = mutex->Mutex.Queue.Queue.owner;
319
320  if ( __predict_true( owner == NULL ) ) {
321    mutex->Mutex.Queue.Queue.owner = executing;
322    ++executing->resource_count;
323    _Mutex_Queue_release( &mutex->Mutex, &queue_context );
324
325    return 0;
326  } else if ( owner == executing ) {
327    ++mutex->nest_level;
328    _Mutex_Queue_release( &mutex->Mutex, &queue_context );
329
330    return 0;
331  } else {
332    Watchdog_Interval ticks;
333
334    switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
335      case TOD_ABSOLUTE_TIMEOUT_INVALID:
336        _Mutex_Queue_release( &mutex->Mutex, &queue_context );
337        return EINVAL;
338      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
339      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
340        _Mutex_Queue_release( &mutex->Mutex, &queue_context );
341        return ETIMEDOUT;
342      default:
343        break;
344    }
345
346    _Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
347    _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, &queue_context );
348
349    return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
350  }
351}
352
353int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
354{
355  Mutex_recursive_Control *mutex;
356  Thread_queue_Context     queue_context;
357  Thread_Control          *executing;
358  Thread_Control          *owner;
359  int                      eno;
360
361  mutex = _Mutex_recursive_Get( _mutex );
362  _Thread_queue_Context_initialize( &queue_context );
363  executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
364
365  owner = mutex->Mutex.Queue.Queue.owner;
366
367  if ( __predict_true( owner == NULL ) ) {
368    mutex->Mutex.Queue.Queue.owner = executing;
369    ++executing->resource_count;
370    eno = 0;
371  } else if ( owner == executing ) {
372    ++mutex->nest_level;
373    eno = 0;
374  } else {
375    eno = EBUSY;
376  }
377
378  _Mutex_Queue_release( &mutex->Mutex, &queue_context );
379
380  return eno;
381}
382
383void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
384{
385  Mutex_recursive_Control *mutex;
386  Thread_queue_Context     queue_context;
387  Thread_Control          *executing;
388  unsigned int             nest_level;
389
390  mutex = _Mutex_recursive_Get( _mutex );
391  _Thread_queue_Context_initialize( &queue_context );
392  executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
393
394  _Assert( mutex->Mutex.Queue.Queue.owner == executing );
395
396  nest_level = mutex->nest_level;
397
398  if ( __predict_true( nest_level == 0 ) ) {
399    _Mutex_Release_critical( &mutex->Mutex, executing, &queue_context );
400  } else {
401    mutex->nest_level = nest_level - 1;
402
403    _Mutex_Queue_release( &mutex->Mutex, &queue_context );
404  }
405}
406
407#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */
Note: See TracBrowser for help on using the repository browser.